blob: 4b73265add495af5e9259015fd63d677cf6f2d50 [file] [log] [blame]
Dan Handley9df48042015-03-19 18:58:55 +00001/*
Samarth Parikh59cfa132017-11-23 14:23:21 +05302 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
Dan Handley9df48042015-03-19 18:58:55 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Dan Handley9df48042015-03-19 18:58:55 +00005 */
6
Sandrine Bailleux04b66d82015-03-18 14:52:53 +00007#include <assert.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +00008#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009
10#include <arch_helpers.h>
11#include <common/debug.h>
Antonio Nino Diaz1b0c6f12019-01-23 21:08:43 +000012#include <drivers/arm/css/css_mhu.h>
Antonio Nino Diazae9654d2019-01-25 14:23:49 +000013#include <drivers/arm/css/css_scpi.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <lib/utils.h>
15#include <plat/common/platform.h>
Antonio Nino Diaza320ecd2019-01-15 14:19:50 +000016#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000017
Vikram Kanigiri72084192016-02-08 16:29:30 +000018#define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE
19#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
20 + 0x100)
Dan Handley9df48042015-03-19 18:58:55 +000021
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010022/* Header and payload addresses for commands from AP to SCP */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000023#define SCPI_CMD_HEADER_AP_TO_SCP \
24 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
25#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
26 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
Dan Handley9df48042015-03-19 18:58:55 +000027
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010028/* Header and payload addresses for responses from SCP to AP */
29#define SCPI_RES_HEADER_SCP_TO_AP \
30 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
31#define SCPI_RES_PAYLOAD_SCP_TO_AP \
32 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
33
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000034/* ID of the MHU slot used for the SCPI protocol */
35#define SCPI_MHU_SLOT_ID 0
Dan Handley9df48042015-03-19 18:58:55 +000036
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000037static void scpi_secure_message_start(void)
Dan Handley9df48042015-03-19 18:58:55 +000038{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000039 mhu_secure_message_start(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000040}
41
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000042static void scpi_secure_message_send(size_t payload_size)
Dan Handley9df48042015-03-19 18:58:55 +000043{
Soby Mathew200fffd2016-10-21 11:34:59 +010044 /*
45 * Ensure that any write to the SCPI payload area is seen by SCP before
Juan Castillo2e86cb12016-01-13 15:01:09 +000046 * we write to the MHU register. If these 2 writes were reordered by
Soby Mathew200fffd2016-10-21 11:34:59 +010047 * the CPU then SCP would read stale payload data
48 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000049 dmbst();
Dan Handley9df48042015-03-19 18:58:55 +000050
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000051 mhu_secure_message_send(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000052}
53
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000054static void scpi_secure_message_receive(scpi_cmd_t *cmd)
Dan Handley9df48042015-03-19 18:58:55 +000055{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000056 uint32_t mhu_status;
Dan Handley9df48042015-03-19 18:58:55 +000057
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000058 assert(cmd != NULL);
Dan Handley9df48042015-03-19 18:58:55 +000059
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000060 mhu_status = mhu_secure_message_wait();
61
62 /* Expect an SCPI message, reject any other protocol */
63 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
64 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
65 mhu_status);
66 panic();
67 }
Dan Handley9df48042015-03-19 18:58:55 +000068
Soby Mathew200fffd2016-10-21 11:34:59 +010069 /*
70 * Ensure that any read to the SCPI payload area is done after reading
Juan Castillo2e86cb12016-01-13 15:01:09 +000071 * the MHU register. If these 2 reads were reordered then the CPU would
Soby Mathew200fffd2016-10-21 11:34:59 +010072 * read invalid payload data
73 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000074 dmbld();
Dan Handley9df48042015-03-19 18:58:55 +000075
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000076 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
Dan Handley9df48042015-03-19 18:58:55 +000077}
78
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000079static void scpi_secure_message_end(void)
Dan Handley9df48042015-03-19 18:58:55 +000080{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000081 mhu_secure_message_end(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000082}
83
84int scpi_wait_ready(void)
85{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000086 scpi_cmd_t scpi_cmd;
87
88 VERBOSE("Waiting for SCP_READY command...\n");
89
Dan Handley9df48042015-03-19 18:58:55 +000090 /* Get a message from the SCP */
91 scpi_secure_message_start();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000092 scpi_secure_message_receive(&scpi_cmd);
Dan Handley9df48042015-03-19 18:58:55 +000093 scpi_secure_message_end();
94
95 /* We are expecting 'SCP Ready', produce correct error if it's not */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000096 scpi_status_t status = SCP_OK;
97 if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
98 ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
99 SCPI_CMD_SCP_READY, scpi_cmd.id);
100 status = SCP_E_SUPPORT;
101 } else if (scpi_cmd.size != 0) {
102 ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
103 scpi_cmd.size);
104 status = SCP_E_SIZE;
105 }
Dan Handley9df48042015-03-19 18:58:55 +0000106
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000107 VERBOSE("Sending response for SCP_READY command\n");
108
109 /*
110 * Send our response back to SCP.
111 * We are using the same SCPI header, just update the status field.
112 */
113 scpi_cmd.status = status;
114 scpi_secure_message_start();
115 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
116 scpi_secure_message_send(0);
117 scpi_secure_message_end();
Dan Handley9df48042015-03-19 18:58:55 +0000118
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000119 return status == SCP_OK ? 0 : -1;
Dan Handley9df48042015-03-19 18:58:55 +0000120}
121
Soby Mathew200fffd2016-10-21 11:34:59 +0100122void scpi_set_css_power_state(unsigned int mpidr,
123 scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
124 scpi_power_state_t css_state)
Dan Handley9df48042015-03-19 18:58:55 +0000125{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000126 scpi_cmd_t *cmd;
127 uint32_t state = 0;
128 uint32_t *payload_addr;
129
Summer Qin93c812f2017-02-28 16:46:17 +0000130#if ARM_PLAT_MT
131 /*
132 * The current SCPI driver only caters for single-threaded platforms.
133 * Hence we ignore the thread ID (which is always 0) for such platforms.
134 */
135 state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */
136 state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */
137#else
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000138 state |= mpidr & 0x0f; /* CPU ID */
Dan Handley9df48042015-03-19 18:58:55 +0000139 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
Summer Qin93c812f2017-02-28 16:46:17 +0000140#endif /* ARM_PLAT_MT */
141
Dan Handley9df48042015-03-19 18:58:55 +0000142 state |= cpu_state << 8;
143 state |= cluster_state << 12;
144 state |= css_state << 16;
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000145
146 scpi_secure_message_start();
147
148 /* Populate the command header */
149 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
150 cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
151 cmd->set = SCPI_SET_NORMAL;
152 cmd->sender = 0;
153 cmd->size = sizeof(state);
154 /* Populate the command payload */
155 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
156 *payload_addr = state;
157 scpi_secure_message_send(sizeof(state));
158 /*
159 * SCP does not reply to this command in order to avoid MHU interrupts
160 * from the sender, which could interfere with its power state request.
161 */
162
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100163 scpi_secure_message_end();
164}
165
166/*
167 * Query and obtain CSS power state from SCP.
168 *
169 * In response to the query, SCP returns power states of all CPUs in all
170 * clusters of the system. The returned response is then filtered based on the
171 * supplied MPIDR. Power states of requested cluster and CPUs within are updated
172 * via. supplied non-NULL pointer arguments.
173 *
174 * Returns 0 on success, or -1 on errors.
175 */
176int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
177 unsigned int *cluster_state_p)
178{
179 scpi_cmd_t *cmd;
180 scpi_cmd_t response;
181 int power_state, cpu, cluster, rc = -1;
182
183 /*
184 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
185 * for only up to 0xf clusters, and 8 CPUs per cluster
186 */
jagadeesh ujja64fa64b2017-05-11 16:32:18 +0530187#if ARM_PLAT_MT
188 /*
189 * The current SCPI driver only caters for single-threaded platforms.
190 * Hence we ignore the thread ID (which is always 0) for such platforms.
191 */
192 cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
193 cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
194#else
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100195 cpu = mpidr & MPIDR_AFFLVL_MASK;
196 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
jagadeesh ujja64fa64b2017-05-11 16:32:18 +0530197#endif /* ARM_PLAT_MT */
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100198 if (cpu >= 8 || cluster >= 0xf)
199 return -1;
200
201 scpi_secure_message_start();
202
203 /* Populate request headers */
Douglas Raillarda8954fc2017-01-26 15:54:44 +0000204 zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
205 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100206 cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
207
208 /*
209 * Send message and wait for SCP's response
210 */
211 scpi_secure_message_send(0);
212 scpi_secure_message_receive(&response);
213
214 if (response.status != SCP_OK)
215 goto exit;
216
217 /* Validate SCP response */
218 if (!CHECK_RESPONSE(response, cluster))
219 goto exit;
220
221 /* Extract power states for required cluster */
222 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
223 if (CLUSTER_ID(power_state) != cluster)
224 goto exit;
225
226 /* Update power state via. pointers */
227 if (cluster_state_p)
228 *cluster_state_p = CLUSTER_POWER_STATE(power_state);
229 if (cpu_state_p)
230 *cpu_state_p = CPU_POWER_STATE(power_state);
231 rc = 0;
232
233exit:
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000234 scpi_secure_message_end();
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100235 return rc;
Dan Handley9df48042015-03-19 18:58:55 +0000236}
237
238uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
239{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000240 scpi_cmd_t *cmd;
241 uint8_t *payload_addr;
242 scpi_cmd_t response;
243
244 scpi_secure_message_start();
Dan Handley9df48042015-03-19 18:58:55 +0000245
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000246 /* Populate the command header */
247 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
248 cmd->id = SCPI_CMD_SYS_POWER_STATE;
249 cmd->set = 0;
250 cmd->sender = 0;
251 cmd->size = sizeof(*payload_addr);
252 /* Populate the command payload */
253 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
254 *payload_addr = system_state & 0xff;
255 scpi_secure_message_send(sizeof(*payload_addr));
256
257 scpi_secure_message_receive(&response);
258
Dan Handley9df48042015-03-19 18:58:55 +0000259 scpi_secure_message_end();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000260
261 return response.status;
Dan Handley9df48042015-03-19 18:58:55 +0000262}