blob: d64bfa2679af50dba8d382a7c74420cb30c47ad3 [file] [log] [blame]
Dan Handley9df48042015-03-19 18:58:55 +00001/*
Samarth Parikh59cfa132017-11-23 14:23:21 +05302 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
Dan Handley9df48042015-03-19 18:58:55 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Dan Handley9df48042015-03-19 18:58:55 +00005 */
6
Sandrine Bailleux04b66d82015-03-18 14:52:53 +00007#include <assert.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +00008#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009
10#include <arch_helpers.h>
11#include <common/debug.h>
12#include <lib/utils.h>
13#include <plat/common/platform.h>
Antonio Nino Diaza320ecd2019-01-15 14:19:50 +000014#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015
Samarth Parikh59cfa132017-11-23 14:23:21 +053016#include "../mhu/css_mhu.h"
Dan Handley9df48042015-03-19 18:58:55 +000017#include "css_scpi.h"
18
Vikram Kanigiri72084192016-02-08 16:29:30 +000019#define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE
20#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
21 + 0x100)
Dan Handley9df48042015-03-19 18:58:55 +000022
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010023/* Header and payload addresses for commands from AP to SCP */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000024#define SCPI_CMD_HEADER_AP_TO_SCP \
25 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
26#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
27 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
Dan Handley9df48042015-03-19 18:58:55 +000028
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010029/* Header and payload addresses for responses from SCP to AP */
30#define SCPI_RES_HEADER_SCP_TO_AP \
31 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
32#define SCPI_RES_PAYLOAD_SCP_TO_AP \
33 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
34
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000035/* ID of the MHU slot used for the SCPI protocol */
36#define SCPI_MHU_SLOT_ID 0
Dan Handley9df48042015-03-19 18:58:55 +000037
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000038static void scpi_secure_message_start(void)
Dan Handley9df48042015-03-19 18:58:55 +000039{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000040 mhu_secure_message_start(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000041}
42
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000043static void scpi_secure_message_send(size_t payload_size)
Dan Handley9df48042015-03-19 18:58:55 +000044{
Soby Mathew200fffd2016-10-21 11:34:59 +010045 /*
46 * Ensure that any write to the SCPI payload area is seen by SCP before
Juan Castillo2e86cb12016-01-13 15:01:09 +000047 * we write to the MHU register. If these 2 writes were reordered by
Soby Mathew200fffd2016-10-21 11:34:59 +010048 * the CPU then SCP would read stale payload data
49 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000050 dmbst();
Dan Handley9df48042015-03-19 18:58:55 +000051
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000052 mhu_secure_message_send(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000053}
54
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000055static void scpi_secure_message_receive(scpi_cmd_t *cmd)
Dan Handley9df48042015-03-19 18:58:55 +000056{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000057 uint32_t mhu_status;
Dan Handley9df48042015-03-19 18:58:55 +000058
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000059 assert(cmd != NULL);
Dan Handley9df48042015-03-19 18:58:55 +000060
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000061 mhu_status = mhu_secure_message_wait();
62
63 /* Expect an SCPI message, reject any other protocol */
64 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
65 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
66 mhu_status);
67 panic();
68 }
Dan Handley9df48042015-03-19 18:58:55 +000069
Soby Mathew200fffd2016-10-21 11:34:59 +010070 /*
71 * Ensure that any read to the SCPI payload area is done after reading
Juan Castillo2e86cb12016-01-13 15:01:09 +000072 * the MHU register. If these 2 reads were reordered then the CPU would
Soby Mathew200fffd2016-10-21 11:34:59 +010073 * read invalid payload data
74 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000075 dmbld();
Dan Handley9df48042015-03-19 18:58:55 +000076
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000077 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
Dan Handley9df48042015-03-19 18:58:55 +000078}
79
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000080static void scpi_secure_message_end(void)
Dan Handley9df48042015-03-19 18:58:55 +000081{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000082 mhu_secure_message_end(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000083}
84
85int scpi_wait_ready(void)
86{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000087 scpi_cmd_t scpi_cmd;
88
89 VERBOSE("Waiting for SCP_READY command...\n");
90
Dan Handley9df48042015-03-19 18:58:55 +000091 /* Get a message from the SCP */
92 scpi_secure_message_start();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000093 scpi_secure_message_receive(&scpi_cmd);
Dan Handley9df48042015-03-19 18:58:55 +000094 scpi_secure_message_end();
95
96 /* We are expecting 'SCP Ready', produce correct error if it's not */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000097 scpi_status_t status = SCP_OK;
98 if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
99 ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
100 SCPI_CMD_SCP_READY, scpi_cmd.id);
101 status = SCP_E_SUPPORT;
102 } else if (scpi_cmd.size != 0) {
103 ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
104 scpi_cmd.size);
105 status = SCP_E_SIZE;
106 }
Dan Handley9df48042015-03-19 18:58:55 +0000107
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000108 VERBOSE("Sending response for SCP_READY command\n");
109
110 /*
111 * Send our response back to SCP.
112 * We are using the same SCPI header, just update the status field.
113 */
114 scpi_cmd.status = status;
115 scpi_secure_message_start();
116 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
117 scpi_secure_message_send(0);
118 scpi_secure_message_end();
Dan Handley9df48042015-03-19 18:58:55 +0000119
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000120 return status == SCP_OK ? 0 : -1;
Dan Handley9df48042015-03-19 18:58:55 +0000121}
122
Soby Mathew200fffd2016-10-21 11:34:59 +0100123void scpi_set_css_power_state(unsigned int mpidr,
124 scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
125 scpi_power_state_t css_state)
Dan Handley9df48042015-03-19 18:58:55 +0000126{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000127 scpi_cmd_t *cmd;
128 uint32_t state = 0;
129 uint32_t *payload_addr;
130
Summer Qin93c812f2017-02-28 16:46:17 +0000131#if ARM_PLAT_MT
132 /*
133 * The current SCPI driver only caters for single-threaded platforms.
134 * Hence we ignore the thread ID (which is always 0) for such platforms.
135 */
136 state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */
137 state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */
138#else
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000139 state |= mpidr & 0x0f; /* CPU ID */
Dan Handley9df48042015-03-19 18:58:55 +0000140 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
Summer Qin93c812f2017-02-28 16:46:17 +0000141#endif /* ARM_PLAT_MT */
142
Dan Handley9df48042015-03-19 18:58:55 +0000143 state |= cpu_state << 8;
144 state |= cluster_state << 12;
145 state |= css_state << 16;
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000146
147 scpi_secure_message_start();
148
149 /* Populate the command header */
150 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
151 cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
152 cmd->set = SCPI_SET_NORMAL;
153 cmd->sender = 0;
154 cmd->size = sizeof(state);
155 /* Populate the command payload */
156 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
157 *payload_addr = state;
158 scpi_secure_message_send(sizeof(state));
159 /*
160 * SCP does not reply to this command in order to avoid MHU interrupts
161 * from the sender, which could interfere with its power state request.
162 */
163
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100164 scpi_secure_message_end();
165}
166
167/*
168 * Query and obtain CSS power state from SCP.
169 *
170 * In response to the query, SCP returns power states of all CPUs in all
171 * clusters of the system. The returned response is then filtered based on the
172 * supplied MPIDR. Power states of requested cluster and CPUs within are updated
173 * via. supplied non-NULL pointer arguments.
174 *
175 * Returns 0 on success, or -1 on errors.
176 */
177int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
178 unsigned int *cluster_state_p)
179{
180 scpi_cmd_t *cmd;
181 scpi_cmd_t response;
182 int power_state, cpu, cluster, rc = -1;
183
184 /*
185 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
186 * for only up to 0xf clusters, and 8 CPUs per cluster
187 */
jagadeesh ujja64fa64b2017-05-11 16:32:18 +0530188#if ARM_PLAT_MT
189 /*
190 * The current SCPI driver only caters for single-threaded platforms.
191 * Hence we ignore the thread ID (which is always 0) for such platforms.
192 */
193 cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
194 cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
195#else
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100196 cpu = mpidr & MPIDR_AFFLVL_MASK;
197 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
jagadeesh ujja64fa64b2017-05-11 16:32:18 +0530198#endif /* ARM_PLAT_MT */
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100199 if (cpu >= 8 || cluster >= 0xf)
200 return -1;
201
202 scpi_secure_message_start();
203
204 /* Populate request headers */
Douglas Raillarda8954fc2017-01-26 15:54:44 +0000205 zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
206 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100207 cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
208
209 /*
210 * Send message and wait for SCP's response
211 */
212 scpi_secure_message_send(0);
213 scpi_secure_message_receive(&response);
214
215 if (response.status != SCP_OK)
216 goto exit;
217
218 /* Validate SCP response */
219 if (!CHECK_RESPONSE(response, cluster))
220 goto exit;
221
222 /* Extract power states for required cluster */
223 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
224 if (CLUSTER_ID(power_state) != cluster)
225 goto exit;
226
227 /* Update power state via. pointers */
228 if (cluster_state_p)
229 *cluster_state_p = CLUSTER_POWER_STATE(power_state);
230 if (cpu_state_p)
231 *cpu_state_p = CPU_POWER_STATE(power_state);
232 rc = 0;
233
234exit:
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000235 scpi_secure_message_end();
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100236 return rc;
Dan Handley9df48042015-03-19 18:58:55 +0000237}
238
239uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
240{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000241 scpi_cmd_t *cmd;
242 uint8_t *payload_addr;
243 scpi_cmd_t response;
244
245 scpi_secure_message_start();
Dan Handley9df48042015-03-19 18:58:55 +0000246
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000247 /* Populate the command header */
248 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
249 cmd->id = SCPI_CMD_SYS_POWER_STATE;
250 cmd->set = 0;
251 cmd->sender = 0;
252 cmd->size = sizeof(*payload_addr);
253 /* Populate the command payload */
254 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
255 *payload_addr = system_state & 0xff;
256 scpi_secure_message_send(sizeof(*payload_addr));
257
258 scpi_secure_message_receive(&response);
259
Dan Handley9df48042015-03-19 18:58:55 +0000260 scpi_secure_message_end();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000261
262 return response.status;
Dan Handley9df48042015-03-19 18:58:55 +0000263}