Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 1 | /* |
Samarth Parikh | 59cfa13 | 2017-11-23 14:23:21 +0530 | [diff] [blame] | 2 | * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved. |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 7 | #include <assert.h> |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 8 | #include <string.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 9 | |
| 10 | #include <arch_helpers.h> |
| 11 | #include <common/debug.h> |
Antonio Nino Diaz | 1b0c6f1 | 2019-01-23 21:08:43 +0000 | [diff] [blame] | 12 | #include <drivers/arm/css/css_mhu.h> |
Antonio Nino Diaz | ae9654d | 2019-01-25 14:23:49 +0000 | [diff] [blame] | 13 | #include <drivers/arm/css/css_scpi.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 14 | #include <lib/utils.h> |
| 15 | #include <plat/common/platform.h> |
Antonio Nino Diaz | a320ecd | 2019-01-15 14:19:50 +0000 | [diff] [blame] | 16 | #include <platform_def.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 17 | |
Vikram Kanigiri | 7208419 | 2016-02-08 16:29:30 +0000 | [diff] [blame] | 18 | #define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE |
| 19 | #define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \ |
| 20 | + 0x100) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 21 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 22 | /* Header and payload addresses for commands from AP to SCP */ |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 23 | #define SCPI_CMD_HEADER_AP_TO_SCP \ |
| 24 | ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP) |
| 25 | #define SCPI_CMD_PAYLOAD_AP_TO_SCP \ |
| 26 | ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t))) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 27 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 28 | /* Header and payload addresses for responses from SCP to AP */ |
| 29 | #define SCPI_RES_HEADER_SCP_TO_AP \ |
| 30 | ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP) |
| 31 | #define SCPI_RES_PAYLOAD_SCP_TO_AP \ |
| 32 | ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t))) |
| 33 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 34 | /* ID of the MHU slot used for the SCPI protocol */ |
| 35 | #define SCPI_MHU_SLOT_ID 0 |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 36 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 37 | static void scpi_secure_message_start(void) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 38 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 39 | mhu_secure_message_start(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 40 | } |
| 41 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 42 | static void scpi_secure_message_send(size_t payload_size) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 43 | { |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 44 | /* |
| 45 | * Ensure that any write to the SCPI payload area is seen by SCP before |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 46 | * we write to the MHU register. If these 2 writes were reordered by |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 47 | * the CPU then SCP would read stale payload data |
| 48 | */ |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 49 | dmbst(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 50 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 51 | mhu_secure_message_send(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 52 | } |
| 53 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 54 | static void scpi_secure_message_receive(scpi_cmd_t *cmd) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 55 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 56 | uint32_t mhu_status; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 57 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 58 | assert(cmd != NULL); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 59 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 60 | mhu_status = mhu_secure_message_wait(); |
| 61 | |
| 62 | /* Expect an SCPI message, reject any other protocol */ |
| 63 | if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) { |
| 64 | ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n", |
| 65 | mhu_status); |
| 66 | panic(); |
| 67 | } |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 68 | |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 69 | /* |
| 70 | * Ensure that any read to the SCPI payload area is done after reading |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 71 | * the MHU register. If these 2 reads were reordered then the CPU would |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 72 | * read invalid payload data |
| 73 | */ |
Juan Castillo | 2e86cb1 | 2016-01-13 15:01:09 +0000 | [diff] [blame] | 74 | dmbld(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 75 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 76 | memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd)); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 77 | } |
| 78 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 79 | static void scpi_secure_message_end(void) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 80 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 81 | mhu_secure_message_end(SCPI_MHU_SLOT_ID); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | int scpi_wait_ready(void) |
| 85 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 86 | scpi_cmd_t scpi_cmd; |
| 87 | |
| 88 | VERBOSE("Waiting for SCP_READY command...\n"); |
| 89 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 90 | /* Get a message from the SCP */ |
| 91 | scpi_secure_message_start(); |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 92 | scpi_secure_message_receive(&scpi_cmd); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 93 | scpi_secure_message_end(); |
| 94 | |
| 95 | /* We are expecting 'SCP Ready', produce correct error if it's not */ |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 96 | scpi_status_t status = SCP_OK; |
| 97 | if (scpi_cmd.id != SCPI_CMD_SCP_READY) { |
| 98 | ERROR("Unexpected SCP command: expected command #%u, got command #%u\n", |
| 99 | SCPI_CMD_SCP_READY, scpi_cmd.id); |
| 100 | status = SCP_E_SUPPORT; |
| 101 | } else if (scpi_cmd.size != 0) { |
| 102 | ERROR("SCP_READY command has incorrect size: expected 0, got %u\n", |
| 103 | scpi_cmd.size); |
| 104 | status = SCP_E_SIZE; |
| 105 | } |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 106 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 107 | VERBOSE("Sending response for SCP_READY command\n"); |
| 108 | |
| 109 | /* |
| 110 | * Send our response back to SCP. |
| 111 | * We are using the same SCPI header, just update the status field. |
| 112 | */ |
| 113 | scpi_cmd.status = status; |
| 114 | scpi_secure_message_start(); |
| 115 | memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd)); |
| 116 | scpi_secure_message_send(0); |
| 117 | scpi_secure_message_end(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 118 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 119 | return status == SCP_OK ? 0 : -1; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 120 | } |
| 121 | |
Soby Mathew | 200fffd | 2016-10-21 11:34:59 +0100 | [diff] [blame] | 122 | void scpi_set_css_power_state(unsigned int mpidr, |
| 123 | scpi_power_state_t cpu_state, scpi_power_state_t cluster_state, |
| 124 | scpi_power_state_t css_state) |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 125 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 126 | scpi_cmd_t *cmd; |
| 127 | uint32_t state = 0; |
| 128 | uint32_t *payload_addr; |
| 129 | |
Summer Qin | 93c812f | 2017-02-28 16:46:17 +0000 | [diff] [blame] | 130 | #if ARM_PLAT_MT |
| 131 | /* |
| 132 | * The current SCPI driver only caters for single-threaded platforms. |
| 133 | * Hence we ignore the thread ID (which is always 0) for such platforms. |
| 134 | */ |
| 135 | state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */ |
| 136 | state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */ |
| 137 | #else |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 138 | state |= mpidr & 0x0f; /* CPU ID */ |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 139 | state |= (mpidr & 0xf00) >> 4; /* Cluster ID */ |
Summer Qin | 93c812f | 2017-02-28 16:46:17 +0000 | [diff] [blame] | 140 | #endif /* ARM_PLAT_MT */ |
| 141 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 142 | state |= cpu_state << 8; |
| 143 | state |= cluster_state << 12; |
| 144 | state |= css_state << 16; |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 145 | |
| 146 | scpi_secure_message_start(); |
| 147 | |
| 148 | /* Populate the command header */ |
| 149 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
| 150 | cmd->id = SCPI_CMD_SET_CSS_POWER_STATE; |
| 151 | cmd->set = SCPI_SET_NORMAL; |
| 152 | cmd->sender = 0; |
| 153 | cmd->size = sizeof(state); |
| 154 | /* Populate the command payload */ |
| 155 | payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; |
| 156 | *payload_addr = state; |
| 157 | scpi_secure_message_send(sizeof(state)); |
| 158 | /* |
| 159 | * SCP does not reply to this command in order to avoid MHU interrupts |
| 160 | * from the sender, which could interfere with its power state request. |
| 161 | */ |
| 162 | |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 163 | scpi_secure_message_end(); |
| 164 | } |
| 165 | |
| 166 | /* |
| 167 | * Query and obtain CSS power state from SCP. |
| 168 | * |
| 169 | * In response to the query, SCP returns power states of all CPUs in all |
| 170 | * clusters of the system. The returned response is then filtered based on the |
| 171 | * supplied MPIDR. Power states of requested cluster and CPUs within are updated |
| 172 | * via. supplied non-NULL pointer arguments. |
| 173 | * |
| 174 | * Returns 0 on success, or -1 on errors. |
| 175 | */ |
| 176 | int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p, |
| 177 | unsigned int *cluster_state_p) |
| 178 | { |
| 179 | scpi_cmd_t *cmd; |
| 180 | scpi_cmd_t response; |
| 181 | int power_state, cpu, cluster, rc = -1; |
| 182 | |
| 183 | /* |
| 184 | * Extract CPU and cluster membership of the given MPIDR. SCPI caters |
| 185 | * for only up to 0xf clusters, and 8 CPUs per cluster |
| 186 | */ |
jagadeesh ujja | 64fa64b | 2017-05-11 16:32:18 +0530 | [diff] [blame] | 187 | #if ARM_PLAT_MT |
| 188 | /* |
| 189 | * The current SCPI driver only caters for single-threaded platforms. |
| 190 | * Hence we ignore the thread ID (which is always 0) for such platforms. |
| 191 | */ |
| 192 | cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; |
| 193 | cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK; |
| 194 | #else |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 195 | cpu = mpidr & MPIDR_AFFLVL_MASK; |
| 196 | cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; |
jagadeesh ujja | 64fa64b | 2017-05-11 16:32:18 +0530 | [diff] [blame] | 197 | #endif /* ARM_PLAT_MT */ |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 198 | if (cpu >= 8 || cluster >= 0xf) |
| 199 | return -1; |
| 200 | |
| 201 | scpi_secure_message_start(); |
| 202 | |
| 203 | /* Populate request headers */ |
Douglas Raillard | a8954fc | 2017-01-26 15:54:44 +0000 | [diff] [blame] | 204 | zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd)); |
| 205 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 206 | cmd->id = SCPI_CMD_GET_CSS_POWER_STATE; |
| 207 | |
| 208 | /* |
| 209 | * Send message and wait for SCP's response |
| 210 | */ |
| 211 | scpi_secure_message_send(0); |
| 212 | scpi_secure_message_receive(&response); |
| 213 | |
| 214 | if (response.status != SCP_OK) |
| 215 | goto exit; |
| 216 | |
| 217 | /* Validate SCP response */ |
| 218 | if (!CHECK_RESPONSE(response, cluster)) |
| 219 | goto exit; |
| 220 | |
| 221 | /* Extract power states for required cluster */ |
| 222 | power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster); |
| 223 | if (CLUSTER_ID(power_state) != cluster) |
| 224 | goto exit; |
| 225 | |
| 226 | /* Update power state via. pointers */ |
| 227 | if (cluster_state_p) |
| 228 | *cluster_state_p = CLUSTER_POWER_STATE(power_state); |
| 229 | if (cpu_state_p) |
| 230 | *cpu_state_p = CPU_POWER_STATE(power_state); |
| 231 | rc = 0; |
| 232 | |
| 233 | exit: |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 234 | scpi_secure_message_end(); |
Jeenu Viswambharan | b1f6809 | 2016-08-04 12:44:52 +0100 | [diff] [blame] | 235 | return rc; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 236 | } |
| 237 | |
| 238 | uint32_t scpi_sys_power_state(scpi_system_state_t system_state) |
| 239 | { |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 240 | scpi_cmd_t *cmd; |
| 241 | uint8_t *payload_addr; |
| 242 | scpi_cmd_t response; |
| 243 | |
| 244 | scpi_secure_message_start(); |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 245 | |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 246 | /* Populate the command header */ |
| 247 | cmd = SCPI_CMD_HEADER_AP_TO_SCP; |
| 248 | cmd->id = SCPI_CMD_SYS_POWER_STATE; |
| 249 | cmd->set = 0; |
| 250 | cmd->sender = 0; |
| 251 | cmd->size = sizeof(*payload_addr); |
| 252 | /* Populate the command payload */ |
| 253 | payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP; |
| 254 | *payload_addr = system_state & 0xff; |
| 255 | scpi_secure_message_send(sizeof(*payload_addr)); |
| 256 | |
| 257 | scpi_secure_message_receive(&response); |
| 258 | |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 259 | scpi_secure_message_end(); |
Sandrine Bailleux | 04b66d8 | 2015-03-18 14:52:53 +0000 | [diff] [blame] | 260 | |
| 261 | return response.status; |
Dan Handley | 9df4804 | 2015-03-19 18:58:55 +0000 | [diff] [blame] | 262 | } |