Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 1 | /* |
Samarth Parikh | 59cfa13 | 2017-11-23 14:23:21 +0530 | [diff] [blame] | 2 | * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved. |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 7 | #include <assert.h> |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 8 | #include <string.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame^] | 9 | |
| 10 | #include <arch_helpers.h> |
| 11 | #include <common/debug.h> |
| 12 | #include <lib/utils.h> |
| 13 | #include <plat/common/platform.h> |
| 14 | |
| 15 | #include <css_def.h> |
| 16 | |
Samarth Parikh | 59cfa13 | 2017-11-23 14:23:21 +0530 | [diff] [blame] | 17 | #include "../mhu/css_mhu.h" |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 18 | #include "css_scpi.h" |
| 19 | |
Vikram Kanigiri | 7208419 | 2016-02-08 16:29:30 +0000 | [diff] [blame] | 20 | #define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE |
| 21 | #define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \ |
| 22 | + 0x100) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 23 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 24 | /* Header and payload addresses for commands from AP to SCP */ |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 25 | #define SCPI_CMD_HEADER_AP_TO_SCP \ |
| 26 | ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP) |
| 27 | #define SCPI_CMD_PAYLOAD_AP_TO_SCP \ |
| 28 | ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t))) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 29 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 30 | /* Header and payload addresses for responses from SCP to AP */ |
| 31 | #define SCPI_RES_HEADER_SCP_TO_AP \ |
| 32 | ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP) |
| 33 | #define SCPI_RES_PAYLOAD_SCP_TO_AP \ |
| 34 | ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t))) |
| 35 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 36 | /* ID of the MHU slot used for the SCPI protocol */ |
| 37 | #define SCPI_MHU_SLOT_ID 0 |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 38 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 39 | static void scpi_secure_message_start(void) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 40 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 41 | mhu_secure_message_start(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 42 | } |
| 43 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 44 | static void scpi_secure_message_send(size_t payload_size) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 45 | { |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 46 | /* |
| 47 | * Ensure that any write to the SCPI payload area is seen by SCP before |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 48 | * we write to the MHU register. If these 2 writes were reordered by |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 49 | * the CPU then SCP would read stale payload data |
| 50 | */ |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 51 | dmbst(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 52 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 53 | mhu_secure_message_send(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 54 | } |
| 55 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 56 | static void scpi_secure_message_receive(scpi_cmd_t *cmd) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 57 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 58 | uint32_t mhu_status; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 59 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 60 | assert(cmd != NULL); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 61 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 62 | mhu_status = mhu_secure_message_wait(); |
| 63 | |
| 64 | /* Expect an SCPI message, reject any other protocol */ |
| 65 | if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) { |
| 66 | ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n", |
| 67 | mhu_status); |
| 68 | panic(); |
| 69 | } |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 70 | |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 71 | /* |
| 72 | * Ensure that any read to the SCPI payload area is done after reading |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 73 | * the MHU register. If these 2 reads were reordered then the CPU would |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 74 | * read invalid payload data |
| 75 | */ |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 76 | dmbld(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 77 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 78 | memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd)); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 79 | } |
| 80 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 81 | static void scpi_secure_message_end(void) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 82 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 83 | mhu_secure_message_end(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | int scpi_wait_ready(void) |
| 87 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 88 | scpi_cmd_t scpi_cmd; |
| 89 | |
| 90 | VERBOSE("Waiting for SCP_READY command...\n"); |
| 91 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 92 | /* Get a message from the SCP */ |
| 93 | scpi_secure_message_start(); |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 94 | scpi_secure_message_receive(&scpi_cmd); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 95 | scpi_secure_message_end(); |
| 96 | |
| 97 | /* We are expecting 'SCP Ready', produce correct error if it's not */ |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 98 | scpi_status_t status = SCP_OK; |
| 99 | if (scpi_cmd.id != SCPI_CMD_SCP_READY) { |
| 100 | ERROR("Unexpected SCP command: expected command #%u, got command #%u\n", |
| 101 | SCPI_CMD_SCP_READY, scpi_cmd.id); |
| 102 | status = SCP_E_SUPPORT; |
| 103 | } else if (scpi_cmd.size != 0) { |
| 104 | ERROR("SCP_READY command has incorrect size: expected 0, got %u\n", |
| 105 | scpi_cmd.size); |
| 106 | status = SCP_E_SIZE; |
| 107 | } |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 108 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 109 | VERBOSE("Sending response for SCP_READY command\n"); |
| 110 | |
| 111 | /* |
| 112 | * Send our response back to SCP. |
| 113 | * We are using the same SCPI header, just update the status field. |
| 114 | */ |
| 115 | scpi_cmd.status = status; |
| 116 | scpi_secure_message_start(); |
| 117 | memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd)); |
| 118 | scpi_secure_message_send(0); |
| 119 | scpi_secure_message_end(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 120 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 121 | return status == SCP_OK ? 0 : -1; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 122 | } |
| 123 | |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 124 | void scpi_set_css_power_state(unsigned int mpidr, |
| 125 | scpi_power_state_t cpu_state, scpi_power_state_t cluster_state, |
| 126 | scpi_power_state_t css_state) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 127 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 128 | scpi_cmd_t *cmd; |
| 129 | uint32_t state = 0; |
| 130 | uint32_t *payload_addr; |
| 131 | |
Summer Qin | 93c812f | 2017-02-28 16:46:17 +0000 | [diff] [blame] | 132 | #if ARM_PLAT_MT |
| 133 | /* |
| 134 | * The current SCPI driver only caters for single-threaded platforms. |
| 135 | * Hence we ignore the thread ID (which is always 0) for such platforms. |
| 136 | */ |
| 137 | state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */ |
| 138 | state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */ |
| 139 | #else |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 140 | state |= mpidr & 0x0f; /* CPU ID */ |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 141 | state |= (mpidr & 0xf00) >> 4; /* Cluster ID */ |
Summer Qin | 93c812f | 2017-02-28 16:46:17 +0000 | [diff] [blame] | 142 | #endif /* ARM_PLAT_MT */ |
| 143 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 144 | state |= cpu_state << 8; |
| 145 | state |= cluster_state << 12; |
| 146 | state |= css_state << 16; |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 147 | |
| 148 | scpi_secure_message_start(); |
| 149 | |
| 150 | /* Populate the command header */ |
| 151 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
| 152 | cmd->id = SCPI_CMD_SET_CSS_POWER_STATE; |
| 153 | cmd->set = SCPI_SET_NORMAL; |
| 154 | cmd->sender = 0; |
| 155 | cmd->size = sizeof(state); |
| 156 | /* Populate the command payload */ |
| 157 | payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; |
| 158 | *payload_addr = state; |
| 159 | scpi_secure_message_send(sizeof(state)); |
| 160 | /* |
| 161 | * SCP does not reply to this command in order to avoid MHU interrupts |
| 162 | * from the sender, which could interfere with its power state request. |
| 163 | */ |
| 164 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 165 | scpi_secure_message_end(); |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * Query and obtain CSS power state from SCP. |
| 170 | * |
| 171 | * In response to the query, SCP returns power states of all CPUs in all |
| 172 | * clusters of the system. The returned response is then filtered based on the |
| 173 | * supplied MPIDR. Power states of requested cluster and CPUs within are updated |
| 174 | * via. supplied non-NULL pointer arguments. |
| 175 | * |
| 176 | * Returns 0 on success, or -1 on errors. |
| 177 | */ |
| 178 | int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p, |
| 179 | unsigned int *cluster_state_p) |
| 180 | { |
| 181 | scpi_cmd_t *cmd; |
| 182 | scpi_cmd_t response; |
| 183 | int power_state, cpu, cluster, rc = -1; |
| 184 | |
| 185 | /* |
| 186 | * Extract CPU and cluster membership of the given MPIDR. SCPI caters |
| 187 | * for only up to 0xf clusters, and 8 CPUs per cluster |
| 188 | */ |
jagadeesh ujja | 64fa64b | 2017-05-11 16:32:18 +0530 | [diff] [blame] | 189 | #if ARM_PLAT_MT |
| 190 | /* |
| 191 | * The current SCPI driver only caters for single-threaded platforms. |
| 192 | * Hence we ignore the thread ID (which is always 0) for such platforms. |
| 193 | */ |
| 194 | cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; |
| 195 | cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK; |
| 196 | #else |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 197 | cpu = mpidr & MPIDR_AFFLVL_MASK; |
| 198 | cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; |
jagadeesh ujja | 64fa64b | 2017-05-11 16:32:18 +0530 | [diff] [blame] | 199 | #endif /* ARM_PLAT_MT */ |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 200 | if (cpu >= 8 || cluster >= 0xf) |
| 201 | return -1; |
| 202 | |
| 203 | scpi_secure_message_start(); |
| 204 | |
| 205 | /* Populate request headers */ |
Douglas Raillard | a8954fc | 2017-01-26 15:54:44 +0000 | [diff] [blame] | 206 | zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd)); |
| 207 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 208 | cmd->id = SCPI_CMD_GET_CSS_POWER_STATE; |
| 209 | |
| 210 | /* |
| 211 | * Send message and wait for SCP's response |
| 212 | */ |
| 213 | scpi_secure_message_send(0); |
| 214 | scpi_secure_message_receive(&response); |
| 215 | |
| 216 | if (response.status != SCP_OK) |
| 217 | goto exit; |
| 218 | |
| 219 | /* Validate SCP response */ |
| 220 | if (!CHECK_RESPONSE(response, cluster)) |
| 221 | goto exit; |
| 222 | |
| 223 | /* Extract power states for required cluster */ |
| 224 | power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster); |
| 225 | if (CLUSTER_ID(power_state) != cluster) |
| 226 | goto exit; |
| 227 | |
| 228 | /* Update power state via. pointers */ |
| 229 | if (cluster_state_p) |
| 230 | *cluster_state_p = CLUSTER_POWER_STATE(power_state); |
| 231 | if (cpu_state_p) |
| 232 | *cpu_state_p = CPU_POWER_STATE(power_state); |
| 233 | rc = 0; |
| 234 | |
| 235 | exit: |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 236 | scpi_secure_message_end(); |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 237 | return rc; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 238 | } |
| 239 | |
| 240 | uint32_t scpi_sys_power_state(scpi_system_state_t system_state) |
| 241 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 242 | scpi_cmd_t *cmd; |
| 243 | uint8_t *payload_addr; |
| 244 | scpi_cmd_t response; |
| 245 | |
| 246 | scpi_secure_message_start(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 247 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 248 | /* Populate the command header */ |
| 249 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
| 250 | cmd->id = SCPI_CMD_SYS_POWER_STATE; |
| 251 | cmd->set = 0; |
| 252 | cmd->sender = 0; |
| 253 | cmd->size = sizeof(*payload_addr); |
| 254 | /* Populate the command payload */ |
| 255 | payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; |
| 256 | *payload_addr = system_state & 0xff; |
| 257 | scpi_secure_message_send(sizeof(*payload_addr)); |
| 258 | |
| 259 | scpi_secure_message_receive(&response); |
| 260 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 261 | scpi_secure_message_end(); |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 262 | |
| 263 | return response.status; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 264 | } |