blob: 7c5c5789a53dfaafdfe785b8fd2af9d0fac37a78 [file] [log] [blame]
Dan Handley9df48042015-03-19 18:58:55 +00001/*
Douglas Raillarda8954fc2017-01-26 15:54:44 +00002 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
Dan Handley9df48042015-03-19 18:58:55 +00003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000032#include <assert.h>
Dan Handley9df48042015-03-19 18:58:55 +000033#include <css_def.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000034#include <debug.h>
Dan Handley9df48042015-03-19 18:58:55 +000035#include <platform.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000036#include <string.h>
Douglas Raillarda8954fc2017-01-26 15:54:44 +000037#include <utils.h>
Dan Handley9df48042015-03-19 18:58:55 +000038#include "css_mhu.h"
39#include "css_scpi.h"
40
Vikram Kanigiri72084192016-02-08 16:29:30 +000041#define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE
42#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
43 + 0x100)
Dan Handley9df48042015-03-19 18:58:55 +000044
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010045/* Header and payload addresses for commands from AP to SCP */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000046#define SCPI_CMD_HEADER_AP_TO_SCP \
47 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
48#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
49 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
Dan Handley9df48042015-03-19 18:58:55 +000050
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010051/* Header and payload addresses for responses from SCP to AP */
52#define SCPI_RES_HEADER_SCP_TO_AP \
53 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
54#define SCPI_RES_PAYLOAD_SCP_TO_AP \
55 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
56
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000057/* ID of the MHU slot used for the SCPI protocol */
58#define SCPI_MHU_SLOT_ID 0
Dan Handley9df48042015-03-19 18:58:55 +000059
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000060static void scpi_secure_message_start(void)
Dan Handley9df48042015-03-19 18:58:55 +000061{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000062 mhu_secure_message_start(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000063}
64
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000065static void scpi_secure_message_send(size_t payload_size)
Dan Handley9df48042015-03-19 18:58:55 +000066{
Soby Mathew200fffd2016-10-21 11:34:59 +010067 /*
68 * Ensure that any write to the SCPI payload area is seen by SCP before
Juan Castillo2e86cb12016-01-13 15:01:09 +000069 * we write to the MHU register. If these 2 writes were reordered by
Soby Mathew200fffd2016-10-21 11:34:59 +010070 * the CPU then SCP would read stale payload data
71 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000072 dmbst();
Dan Handley9df48042015-03-19 18:58:55 +000073
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000074 mhu_secure_message_send(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000075}
76
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000077static void scpi_secure_message_receive(scpi_cmd_t *cmd)
Dan Handley9df48042015-03-19 18:58:55 +000078{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000079 uint32_t mhu_status;
Dan Handley9df48042015-03-19 18:58:55 +000080
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000081 assert(cmd != NULL);
Dan Handley9df48042015-03-19 18:58:55 +000082
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000083 mhu_status = mhu_secure_message_wait();
84
85 /* Expect an SCPI message, reject any other protocol */
86 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
87 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
88 mhu_status);
89 panic();
90 }
Dan Handley9df48042015-03-19 18:58:55 +000091
Soby Mathew200fffd2016-10-21 11:34:59 +010092 /*
93 * Ensure that any read to the SCPI payload area is done after reading
Juan Castillo2e86cb12016-01-13 15:01:09 +000094 * the MHU register. If these 2 reads were reordered then the CPU would
Soby Mathew200fffd2016-10-21 11:34:59 +010095 * read invalid payload data
96 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000097 dmbld();
Dan Handley9df48042015-03-19 18:58:55 +000098
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000099 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
Dan Handley9df48042015-03-19 18:58:55 +0000100}
101
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000102static void scpi_secure_message_end(void)
Dan Handley9df48042015-03-19 18:58:55 +0000103{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000104 mhu_secure_message_end(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +0000105}
106
107int scpi_wait_ready(void)
108{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000109 scpi_cmd_t scpi_cmd;
110
111 VERBOSE("Waiting for SCP_READY command...\n");
112
Dan Handley9df48042015-03-19 18:58:55 +0000113 /* Get a message from the SCP */
114 scpi_secure_message_start();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000115 scpi_secure_message_receive(&scpi_cmd);
Dan Handley9df48042015-03-19 18:58:55 +0000116 scpi_secure_message_end();
117
118 /* We are expecting 'SCP Ready', produce correct error if it's not */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000119 scpi_status_t status = SCP_OK;
120 if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
121 ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
122 SCPI_CMD_SCP_READY, scpi_cmd.id);
123 status = SCP_E_SUPPORT;
124 } else if (scpi_cmd.size != 0) {
125 ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
126 scpi_cmd.size);
127 status = SCP_E_SIZE;
128 }
Dan Handley9df48042015-03-19 18:58:55 +0000129
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000130 VERBOSE("Sending response for SCP_READY command\n");
131
132 /*
133 * Send our response back to SCP.
134 * We are using the same SCPI header, just update the status field.
135 */
136 scpi_cmd.status = status;
137 scpi_secure_message_start();
138 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
139 scpi_secure_message_send(0);
140 scpi_secure_message_end();
Dan Handley9df48042015-03-19 18:58:55 +0000141
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000142 return status == SCP_OK ? 0 : -1;
Dan Handley9df48042015-03-19 18:58:55 +0000143}
144
Soby Mathew200fffd2016-10-21 11:34:59 +0100145void scpi_set_css_power_state(unsigned int mpidr,
146 scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
147 scpi_power_state_t css_state)
Dan Handley9df48042015-03-19 18:58:55 +0000148{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000149 scpi_cmd_t *cmd;
150 uint32_t state = 0;
151 uint32_t *payload_addr;
152
Summer Qin93c812f2017-02-28 16:46:17 +0000153#if ARM_PLAT_MT
154 /*
155 * The current SCPI driver only caters for single-threaded platforms.
156 * Hence we ignore the thread ID (which is always 0) for such platforms.
157 */
158 state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */
159 state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */
160#else
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000161 state |= mpidr & 0x0f; /* CPU ID */
Dan Handley9df48042015-03-19 18:58:55 +0000162 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
Summer Qin93c812f2017-02-28 16:46:17 +0000163#endif /* ARM_PLAT_MT */
164
Dan Handley9df48042015-03-19 18:58:55 +0000165 state |= cpu_state << 8;
166 state |= cluster_state << 12;
167 state |= css_state << 16;
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000168
169 scpi_secure_message_start();
170
171 /* Populate the command header */
172 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
173 cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
174 cmd->set = SCPI_SET_NORMAL;
175 cmd->sender = 0;
176 cmd->size = sizeof(state);
177 /* Populate the command payload */
178 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
179 *payload_addr = state;
180 scpi_secure_message_send(sizeof(state));
181 /*
182 * SCP does not reply to this command in order to avoid MHU interrupts
183 * from the sender, which could interfere with its power state request.
184 */
185
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100186 scpi_secure_message_end();
187}
188
189/*
190 * Query and obtain CSS power state from SCP.
191 *
192 * In response to the query, SCP returns power states of all CPUs in all
193 * clusters of the system. The returned response is then filtered based on the
194 * supplied MPIDR. Power states of requested cluster and CPUs within are updated
195 * via. supplied non-NULL pointer arguments.
196 *
197 * Returns 0 on success, or -1 on errors.
198 */
199int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
200 unsigned int *cluster_state_p)
201{
202 scpi_cmd_t *cmd;
203 scpi_cmd_t response;
204 int power_state, cpu, cluster, rc = -1;
205
206 /*
207 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
208 * for only up to 0xf clusters, and 8 CPUs per cluster
209 */
210 cpu = mpidr & MPIDR_AFFLVL_MASK;
211 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
212 if (cpu >= 8 || cluster >= 0xf)
213 return -1;
214
215 scpi_secure_message_start();
216
217 /* Populate request headers */
Douglas Raillarda8954fc2017-01-26 15:54:44 +0000218 zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
219 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100220 cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
221
222 /*
223 * Send message and wait for SCP's response
224 */
225 scpi_secure_message_send(0);
226 scpi_secure_message_receive(&response);
227
228 if (response.status != SCP_OK)
229 goto exit;
230
231 /* Validate SCP response */
232 if (!CHECK_RESPONSE(response, cluster))
233 goto exit;
234
235 /* Extract power states for required cluster */
236 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
237 if (CLUSTER_ID(power_state) != cluster)
238 goto exit;
239
240 /* Update power state via. pointers */
241 if (cluster_state_p)
242 *cluster_state_p = CLUSTER_POWER_STATE(power_state);
243 if (cpu_state_p)
244 *cpu_state_p = CPU_POWER_STATE(power_state);
245 rc = 0;
246
247exit:
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000248 scpi_secure_message_end();
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100249 return rc;
Dan Handley9df48042015-03-19 18:58:55 +0000250}
251
252uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
253{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000254 scpi_cmd_t *cmd;
255 uint8_t *payload_addr;
256 scpi_cmd_t response;
257
258 scpi_secure_message_start();
Dan Handley9df48042015-03-19 18:58:55 +0000259
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000260 /* Populate the command header */
261 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
262 cmd->id = SCPI_CMD_SYS_POWER_STATE;
263 cmd->set = 0;
264 cmd->sender = 0;
265 cmd->size = sizeof(*payload_addr);
266 /* Populate the command payload */
267 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
268 *payload_addr = system_state & 0xff;
269 scpi_secure_message_send(sizeof(*payload_addr));
270
271 scpi_secure_message_receive(&response);
272
Dan Handley9df48042015-03-19 18:58:55 +0000273 scpi_secure_message_end();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000274
275 return response.status;
Dan Handley9df48042015-03-19 18:58:55 +0000276}