blob: 65ae978f867781208d0f40edeac3aba0eea5838d [file] [log] [blame]
Dan Handley9df48042015-03-19 18:58:55 +00001/*
Douglas Raillarda8954fc2017-01-26 15:54:44 +00002 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
Dan Handley9df48042015-03-19 18:58:55 +00003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000032#include <assert.h>
Dan Handley9df48042015-03-19 18:58:55 +000033#include <css_def.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000034#include <debug.h>
Dan Handley9df48042015-03-19 18:58:55 +000035#include <platform.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000036#include <string.h>
Douglas Raillarda8954fc2017-01-26 15:54:44 +000037#include <utils.h>
Dan Handley9df48042015-03-19 18:58:55 +000038#include "css_mhu.h"
39#include "css_scpi.h"
40
Vikram Kanigiri72084192016-02-08 16:29:30 +000041#define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE
42#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
43 + 0x100)
Dan Handley9df48042015-03-19 18:58:55 +000044
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010045/* Header and payload addresses for commands from AP to SCP */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000046#define SCPI_CMD_HEADER_AP_TO_SCP \
47 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
48#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
49 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
Dan Handley9df48042015-03-19 18:58:55 +000050
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010051/* Header and payload addresses for responses from SCP to AP */
52#define SCPI_RES_HEADER_SCP_TO_AP \
53 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
54#define SCPI_RES_PAYLOAD_SCP_TO_AP \
55 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
56
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000057/* ID of the MHU slot used for the SCPI protocol */
58#define SCPI_MHU_SLOT_ID 0
Dan Handley9df48042015-03-19 18:58:55 +000059
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000060static void scpi_secure_message_start(void)
Dan Handley9df48042015-03-19 18:58:55 +000061{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000062 mhu_secure_message_start(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000063}
64
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000065static void scpi_secure_message_send(size_t payload_size)
Dan Handley9df48042015-03-19 18:58:55 +000066{
Soby Mathew200fffd2016-10-21 11:34:59 +010067 /*
68 * Ensure that any write to the SCPI payload area is seen by SCP before
Juan Castillo2e86cb12016-01-13 15:01:09 +000069 * we write to the MHU register. If these 2 writes were reordered by
Soby Mathew200fffd2016-10-21 11:34:59 +010070 * the CPU then SCP would read stale payload data
71 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000072 dmbst();
Dan Handley9df48042015-03-19 18:58:55 +000073
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000074 mhu_secure_message_send(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000075}
76
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000077static void scpi_secure_message_receive(scpi_cmd_t *cmd)
Dan Handley9df48042015-03-19 18:58:55 +000078{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000079 uint32_t mhu_status;
Dan Handley9df48042015-03-19 18:58:55 +000080
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000081 assert(cmd != NULL);
Dan Handley9df48042015-03-19 18:58:55 +000082
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000083 mhu_status = mhu_secure_message_wait();
84
85 /* Expect an SCPI message, reject any other protocol */
86 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
87 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
88 mhu_status);
89 panic();
90 }
Dan Handley9df48042015-03-19 18:58:55 +000091
Soby Mathew200fffd2016-10-21 11:34:59 +010092 /*
93 * Ensure that any read to the SCPI payload area is done after reading
Juan Castillo2e86cb12016-01-13 15:01:09 +000094 * the MHU register. If these 2 reads were reordered then the CPU would
Soby Mathew200fffd2016-10-21 11:34:59 +010095 * read invalid payload data
96 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000097 dmbld();
Dan Handley9df48042015-03-19 18:58:55 +000098
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000099 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
Dan Handley9df48042015-03-19 18:58:55 +0000100}
101
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000102static void scpi_secure_message_end(void)
Dan Handley9df48042015-03-19 18:58:55 +0000103{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000104 mhu_secure_message_end(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +0000105}
106
107int scpi_wait_ready(void)
108{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000109 scpi_cmd_t scpi_cmd;
110
111 VERBOSE("Waiting for SCP_READY command...\n");
112
Dan Handley9df48042015-03-19 18:58:55 +0000113 /* Get a message from the SCP */
114 scpi_secure_message_start();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000115 scpi_secure_message_receive(&scpi_cmd);
Dan Handley9df48042015-03-19 18:58:55 +0000116 scpi_secure_message_end();
117
118 /* We are expecting 'SCP Ready', produce correct error if it's not */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000119 scpi_status_t status = SCP_OK;
120 if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
121 ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
122 SCPI_CMD_SCP_READY, scpi_cmd.id);
123 status = SCP_E_SUPPORT;
124 } else if (scpi_cmd.size != 0) {
125 ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
126 scpi_cmd.size);
127 status = SCP_E_SIZE;
128 }
Dan Handley9df48042015-03-19 18:58:55 +0000129
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000130 VERBOSE("Sending response for SCP_READY command\n");
131
132 /*
133 * Send our response back to SCP.
134 * We are using the same SCPI header, just update the status field.
135 */
136 scpi_cmd.status = status;
137 scpi_secure_message_start();
138 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
139 scpi_secure_message_send(0);
140 scpi_secure_message_end();
Dan Handley9df48042015-03-19 18:58:55 +0000141
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000142 return status == SCP_OK ? 0 : -1;
Dan Handley9df48042015-03-19 18:58:55 +0000143}
144
Soby Mathew200fffd2016-10-21 11:34:59 +0100145void scpi_set_css_power_state(unsigned int mpidr,
146 scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
147 scpi_power_state_t css_state)
Dan Handley9df48042015-03-19 18:58:55 +0000148{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000149 scpi_cmd_t *cmd;
150 uint32_t state = 0;
151 uint32_t *payload_addr;
152
153 state |= mpidr & 0x0f; /* CPU ID */
Dan Handley9df48042015-03-19 18:58:55 +0000154 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
155 state |= cpu_state << 8;
156 state |= cluster_state << 12;
157 state |= css_state << 16;
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000158
159 scpi_secure_message_start();
160
161 /* Populate the command header */
162 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
163 cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
164 cmd->set = SCPI_SET_NORMAL;
165 cmd->sender = 0;
166 cmd->size = sizeof(state);
167 /* Populate the command payload */
168 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
169 *payload_addr = state;
170 scpi_secure_message_send(sizeof(state));
171 /*
172 * SCP does not reply to this command in order to avoid MHU interrupts
173 * from the sender, which could interfere with its power state request.
174 */
175
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100176 scpi_secure_message_end();
177}
178
179/*
180 * Query and obtain CSS power state from SCP.
181 *
182 * In response to the query, SCP returns power states of all CPUs in all
183 * clusters of the system. The returned response is then filtered based on the
184 * supplied MPIDR. Power states of requested cluster and CPUs within are updated
185 * via. supplied non-NULL pointer arguments.
186 *
187 * Returns 0 on success, or -1 on errors.
188 */
189int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
190 unsigned int *cluster_state_p)
191{
192 scpi_cmd_t *cmd;
193 scpi_cmd_t response;
194 int power_state, cpu, cluster, rc = -1;
195
196 /*
197 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
198 * for only up to 0xf clusters, and 8 CPUs per cluster
199 */
200 cpu = mpidr & MPIDR_AFFLVL_MASK;
201 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
202 if (cpu >= 8 || cluster >= 0xf)
203 return -1;
204
205 scpi_secure_message_start();
206
207 /* Populate request headers */
Douglas Raillarda8954fc2017-01-26 15:54:44 +0000208 zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
209 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100210 cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
211
212 /*
213 * Send message and wait for SCP's response
214 */
215 scpi_secure_message_send(0);
216 scpi_secure_message_receive(&response);
217
218 if (response.status != SCP_OK)
219 goto exit;
220
221 /* Validate SCP response */
222 if (!CHECK_RESPONSE(response, cluster))
223 goto exit;
224
225 /* Extract power states for required cluster */
226 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
227 if (CLUSTER_ID(power_state) != cluster)
228 goto exit;
229
230 /* Update power state via. pointers */
231 if (cluster_state_p)
232 *cluster_state_p = CLUSTER_POWER_STATE(power_state);
233 if (cpu_state_p)
234 *cpu_state_p = CPU_POWER_STATE(power_state);
235 rc = 0;
236
237exit:
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000238 scpi_secure_message_end();
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100239 return rc;
Dan Handley9df48042015-03-19 18:58:55 +0000240}
241
242uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
243{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000244 scpi_cmd_t *cmd;
245 uint8_t *payload_addr;
246 scpi_cmd_t response;
247
248 scpi_secure_message_start();
Dan Handley9df48042015-03-19 18:58:55 +0000249
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000250 /* Populate the command header */
251 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
252 cmd->id = SCPI_CMD_SYS_POWER_STATE;
253 cmd->set = 0;
254 cmd->sender = 0;
255 cmd->size = sizeof(*payload_addr);
256 /* Populate the command payload */
257 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
258 *payload_addr = system_state & 0xff;
259 scpi_secure_message_send(sizeof(*payload_addr));
260
261 scpi_secure_message_receive(&response);
262
Dan Handley9df48042015-03-19 18:58:55 +0000263 scpi_secure_message_end();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000264
265 return response.status;
Dan Handley9df48042015-03-19 18:58:55 +0000266}