Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 1 | /* |
Samarth Parikh | 59cfa13 | 2017-11-23 14:23:21 +0530 | [diff] [blame] | 2 | * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved. |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 7 | #include <assert.h> |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 8 | #include <string.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 9 | |
| 10 | #include <arch_helpers.h> |
| 11 | #include <common/debug.h> |
| 12 | #include <lib/utils.h> |
| 13 | #include <plat/common/platform.h> |
Antonio Nino Diaz | a320ecd | 2019-01-15 14:19:50 +0000 | [diff] [blame] | 14 | #include <platform_def.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 15 | |
Samarth Parikh | 59cfa13 | 2017-11-23 14:23:21 +0530 | [diff] [blame] | 16 | #include "../mhu/css_mhu.h" |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 17 | #include "css_scpi.h" |
| 18 | |
Vikram Kanigiri | 7208419 | 2016-02-08 16:29:30 +0000 | [diff] [blame] | 19 | #define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE |
| 20 | #define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \ |
| 21 | + 0x100) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 22 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 23 | /* Header and payload addresses for commands from AP to SCP */ |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 24 | #define SCPI_CMD_HEADER_AP_TO_SCP \ |
| 25 | ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP) |
| 26 | #define SCPI_CMD_PAYLOAD_AP_TO_SCP \ |
| 27 | ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t))) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 28 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 29 | /* Header and payload addresses for responses from SCP to AP */ |
| 30 | #define SCPI_RES_HEADER_SCP_TO_AP \ |
| 31 | ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP) |
| 32 | #define SCPI_RES_PAYLOAD_SCP_TO_AP \ |
| 33 | ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t))) |
| 34 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 35 | /* ID of the MHU slot used for the SCPI protocol */ |
| 36 | #define SCPI_MHU_SLOT_ID 0 |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 37 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 38 | static void scpi_secure_message_start(void) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 39 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 40 | mhu_secure_message_start(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 41 | } |
| 42 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 43 | static void scpi_secure_message_send(size_t payload_size) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 44 | { |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 45 | /* |
| 46 | * Ensure that any write to the SCPI payload area is seen by SCP before |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 47 | * we write to the MHU register. If these 2 writes were reordered by |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 48 | * the CPU then SCP would read stale payload data |
| 49 | */ |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 50 | dmbst(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 51 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 52 | mhu_secure_message_send(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 53 | } |
| 54 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 55 | static void scpi_secure_message_receive(scpi_cmd_t *cmd) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 56 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 57 | uint32_t mhu_status; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 58 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 59 | assert(cmd != NULL); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 60 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 61 | mhu_status = mhu_secure_message_wait(); |
| 62 | |
| 63 | /* Expect an SCPI message, reject any other protocol */ |
| 64 | if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) { |
| 65 | ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n", |
| 66 | mhu_status); |
| 67 | panic(); |
| 68 | } |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 69 | |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 70 | /* |
| 71 | * Ensure that any read to the SCPI payload area is done after reading |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 72 | * the MHU register. If these 2 reads were reordered then the CPU would |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 73 | * read invalid payload data |
| 74 | */ |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 75 | dmbld(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 76 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 77 | memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd)); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 78 | } |
| 79 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 80 | static void scpi_secure_message_end(void) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 81 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 82 | mhu_secure_message_end(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | int scpi_wait_ready(void) |
| 86 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 87 | scpi_cmd_t scpi_cmd; |
| 88 | |
| 89 | VERBOSE("Waiting for SCP_READY command...\n"); |
| 90 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 91 | /* Get a message from the SCP */ |
| 92 | scpi_secure_message_start(); |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 93 | scpi_secure_message_receive(&scpi_cmd); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 94 | scpi_secure_message_end(); |
| 95 | |
| 96 | /* We are expecting 'SCP Ready', produce correct error if it's not */ |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 97 | scpi_status_t status = SCP_OK; |
| 98 | if (scpi_cmd.id != SCPI_CMD_SCP_READY) { |
| 99 | ERROR("Unexpected SCP command: expected command #%u, got command #%u\n", |
| 100 | SCPI_CMD_SCP_READY, scpi_cmd.id); |
| 101 | status = SCP_E_SUPPORT; |
| 102 | } else if (scpi_cmd.size != 0) { |
| 103 | ERROR("SCP_READY command has incorrect size: expected 0, got %u\n", |
| 104 | scpi_cmd.size); |
| 105 | status = SCP_E_SIZE; |
| 106 | } |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 107 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 108 | VERBOSE("Sending response for SCP_READY command\n"); |
| 109 | |
| 110 | /* |
| 111 | * Send our response back to SCP. |
| 112 | * We are using the same SCPI header, just update the status field. |
| 113 | */ |
| 114 | scpi_cmd.status = status; |
| 115 | scpi_secure_message_start(); |
| 116 | memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd)); |
| 117 | scpi_secure_message_send(0); |
| 118 | scpi_secure_message_end(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 119 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 120 | return status == SCP_OK ? 0 : -1; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 121 | } |
| 122 | |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 123 | void scpi_set_css_power_state(unsigned int mpidr, |
| 124 | scpi_power_state_t cpu_state, scpi_power_state_t cluster_state, |
| 125 | scpi_power_state_t css_state) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 126 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 127 | scpi_cmd_t *cmd; |
| 128 | uint32_t state = 0; |
| 129 | uint32_t *payload_addr; |
| 130 | |
Summer Qin | 93c812f | 2017-02-28 16:46:17 +0000 | [diff] [blame] | 131 | #if ARM_PLAT_MT |
| 132 | /* |
| 133 | * The current SCPI driver only caters for single-threaded platforms. |
| 134 | * Hence we ignore the thread ID (which is always 0) for such platforms. |
| 135 | */ |
| 136 | state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */ |
| 137 | state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */ |
| 138 | #else |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 139 | state |= mpidr & 0x0f; /* CPU ID */ |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 140 | state |= (mpidr & 0xf00) >> 4; /* Cluster ID */ |
Summer Qin | 93c812f | 2017-02-28 16:46:17 +0000 | [diff] [blame] | 141 | #endif /* ARM_PLAT_MT */ |
| 142 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 143 | state |= cpu_state << 8; |
| 144 | state |= cluster_state << 12; |
| 145 | state |= css_state << 16; |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 146 | |
| 147 | scpi_secure_message_start(); |
| 148 | |
| 149 | /* Populate the command header */ |
| 150 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
| 151 | cmd->id = SCPI_CMD_SET_CSS_POWER_STATE; |
| 152 | cmd->set = SCPI_SET_NORMAL; |
| 153 | cmd->sender = 0; |
| 154 | cmd->size = sizeof(state); |
| 155 | /* Populate the command payload */ |
| 156 | payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; |
| 157 | *payload_addr = state; |
| 158 | scpi_secure_message_send(sizeof(state)); |
| 159 | /* |
| 160 | * SCP does not reply to this command in order to avoid MHU interrupts |
| 161 | * from the sender, which could interfere with its power state request. |
| 162 | */ |
| 163 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 164 | scpi_secure_message_end(); |
| 165 | } |
| 166 | |
| 167 | /* |
| 168 | * Query and obtain CSS power state from SCP. |
| 169 | * |
| 170 | * In response to the query, SCP returns power states of all CPUs in all |
| 171 | * clusters of the system. The returned response is then filtered based on the |
| 172 | * supplied MPIDR. Power states of requested cluster and CPUs within are updated |
| 173 | * via. supplied non-NULL pointer arguments. |
| 174 | * |
| 175 | * Returns 0 on success, or -1 on errors. |
| 176 | */ |
| 177 | int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p, |
| 178 | unsigned int *cluster_state_p) |
| 179 | { |
| 180 | scpi_cmd_t *cmd; |
| 181 | scpi_cmd_t response; |
| 182 | int power_state, cpu, cluster, rc = -1; |
| 183 | |
| 184 | /* |
| 185 | * Extract CPU and cluster membership of the given MPIDR. SCPI caters |
| 186 | * for only up to 0xf clusters, and 8 CPUs per cluster |
| 187 | */ |
jagadeesh ujja | 64fa64b | 2017-05-11 16:32:18 +0530 | [diff] [blame] | 188 | #if ARM_PLAT_MT |
| 189 | /* |
| 190 | * The current SCPI driver only caters for single-threaded platforms. |
| 191 | * Hence we ignore the thread ID (which is always 0) for such platforms. |
| 192 | */ |
| 193 | cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; |
| 194 | cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK; |
| 195 | #else |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 196 | cpu = mpidr & MPIDR_AFFLVL_MASK; |
| 197 | cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; |
jagadeesh ujja | 64fa64b | 2017-05-11 16:32:18 +0530 | [diff] [blame] | 198 | #endif /* ARM_PLAT_MT */ |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 199 | if (cpu >= 8 || cluster >= 0xf) |
| 200 | return -1; |
| 201 | |
| 202 | scpi_secure_message_start(); |
| 203 | |
| 204 | /* Populate request headers */ |
Douglas Raillard | a8954fc | 2017-01-26 15:54:44 +0000 | [diff] [blame] | 205 | zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd)); |
| 206 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 207 | cmd->id = SCPI_CMD_GET_CSS_POWER_STATE; |
| 208 | |
| 209 | /* |
| 210 | * Send message and wait for SCP's response |
| 211 | */ |
| 212 | scpi_secure_message_send(0); |
| 213 | scpi_secure_message_receive(&response); |
| 214 | |
| 215 | if (response.status != SCP_OK) |
| 216 | goto exit; |
| 217 | |
| 218 | /* Validate SCP response */ |
| 219 | if (!CHECK_RESPONSE(response, cluster)) |
| 220 | goto exit; |
| 221 | |
| 222 | /* Extract power states for required cluster */ |
| 223 | power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster); |
| 224 | if (CLUSTER_ID(power_state) != cluster) |
| 225 | goto exit; |
| 226 | |
| 227 | /* Update power state via. pointers */ |
| 228 | if (cluster_state_p) |
| 229 | *cluster_state_p = CLUSTER_POWER_STATE(power_state); |
| 230 | if (cpu_state_p) |
| 231 | *cpu_state_p = CPU_POWER_STATE(power_state); |
| 232 | rc = 0; |
| 233 | |
| 234 | exit: |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 235 | scpi_secure_message_end(); |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 236 | return rc; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | uint32_t scpi_sys_power_state(scpi_system_state_t system_state) |
| 240 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 241 | scpi_cmd_t *cmd; |
| 242 | uint8_t *payload_addr; |
| 243 | scpi_cmd_t response; |
| 244 | |
| 245 | scpi_secure_message_start(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 246 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 247 | /* Populate the command header */ |
| 248 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
| 249 | cmd->id = SCPI_CMD_SYS_POWER_STATE; |
| 250 | cmd->set = 0; |
| 251 | cmd->sender = 0; |
| 252 | cmd->size = sizeof(*payload_addr); |
| 253 | /* Populate the command payload */ |
| 254 | payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; |
| 255 | *payload_addr = system_state & 0xff; |
| 256 | scpi_secure_message_send(sizeof(*payload_addr)); |
| 257 | |
| 258 | scpi_secure_message_receive(&response); |
| 259 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 260 | scpi_secure_message_end(); |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 261 | |
| 262 | return response.status; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 263 | } |