blob: f419abd03ac66e68eb18dfec98f096afbf8e49c6 [file] [log] [blame]
Dan Handley9df48042015-03-19 18:58:55 +00001/*
Vikram Kanigiri72084192016-02-08 16:29:30 +00002 * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
Dan Handley9df48042015-03-19 18:58:55 +00003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000032#include <assert.h>
Dan Handley9df48042015-03-19 18:58:55 +000033#include <css_def.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000034#include <debug.h>
Dan Handley9df48042015-03-19 18:58:55 +000035#include <platform.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000036#include <string.h>
Dan Handley9df48042015-03-19 18:58:55 +000037#include "css_mhu.h"
38#include "css_scpi.h"
39
Vikram Kanigiri72084192016-02-08 16:29:30 +000040#define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE
41#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
42 + 0x100)
Dan Handley9df48042015-03-19 18:58:55 +000043
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010044/* Header and payload addresses for commands from AP to SCP */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000045#define SCPI_CMD_HEADER_AP_TO_SCP \
46 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
47#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
48 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
Dan Handley9df48042015-03-19 18:58:55 +000049
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010050/* Header and payload addresses for responses from SCP to AP */
51#define SCPI_RES_HEADER_SCP_TO_AP \
52 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
53#define SCPI_RES_PAYLOAD_SCP_TO_AP \
54 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
55
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000056/* ID of the MHU slot used for the SCPI protocol */
57#define SCPI_MHU_SLOT_ID 0
Dan Handley9df48042015-03-19 18:58:55 +000058
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000059static void scpi_secure_message_start(void)
Dan Handley9df48042015-03-19 18:58:55 +000060{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000061 mhu_secure_message_start(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000062}
63
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000064static void scpi_secure_message_send(size_t payload_size)
Dan Handley9df48042015-03-19 18:58:55 +000065{
Soby Mathew200fffd2016-10-21 11:34:59 +010066 /*
67 * Ensure that any write to the SCPI payload area is seen by SCP before
Juan Castillo2e86cb12016-01-13 15:01:09 +000068 * we write to the MHU register. If these 2 writes were reordered by
Soby Mathew200fffd2016-10-21 11:34:59 +010069 * the CPU then SCP would read stale payload data
70 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000071 dmbst();
Dan Handley9df48042015-03-19 18:58:55 +000072
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000073 mhu_secure_message_send(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000074}
75
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000076static void scpi_secure_message_receive(scpi_cmd_t *cmd)
Dan Handley9df48042015-03-19 18:58:55 +000077{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000078 uint32_t mhu_status;
Dan Handley9df48042015-03-19 18:58:55 +000079
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000080 assert(cmd != NULL);
Dan Handley9df48042015-03-19 18:58:55 +000081
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000082 mhu_status = mhu_secure_message_wait();
83
84 /* Expect an SCPI message, reject any other protocol */
85 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
86 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
87 mhu_status);
88 panic();
89 }
Dan Handley9df48042015-03-19 18:58:55 +000090
Soby Mathew200fffd2016-10-21 11:34:59 +010091 /*
92 * Ensure that any read to the SCPI payload area is done after reading
Juan Castillo2e86cb12016-01-13 15:01:09 +000093 * the MHU register. If these 2 reads were reordered then the CPU would
Soby Mathew200fffd2016-10-21 11:34:59 +010094 * read invalid payload data
95 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000096 dmbld();
Dan Handley9df48042015-03-19 18:58:55 +000097
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000098 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
Dan Handley9df48042015-03-19 18:58:55 +000099}
100
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000101static void scpi_secure_message_end(void)
Dan Handley9df48042015-03-19 18:58:55 +0000102{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000103 mhu_secure_message_end(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +0000104}
105
106int scpi_wait_ready(void)
107{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000108 scpi_cmd_t scpi_cmd;
109
110 VERBOSE("Waiting for SCP_READY command...\n");
111
Dan Handley9df48042015-03-19 18:58:55 +0000112 /* Get a message from the SCP */
113 scpi_secure_message_start();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000114 scpi_secure_message_receive(&scpi_cmd);
Dan Handley9df48042015-03-19 18:58:55 +0000115 scpi_secure_message_end();
116
117 /* We are expecting 'SCP Ready', produce correct error if it's not */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000118 scpi_status_t status = SCP_OK;
119 if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
120 ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
121 SCPI_CMD_SCP_READY, scpi_cmd.id);
122 status = SCP_E_SUPPORT;
123 } else if (scpi_cmd.size != 0) {
124 ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
125 scpi_cmd.size);
126 status = SCP_E_SIZE;
127 }
Dan Handley9df48042015-03-19 18:58:55 +0000128
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000129 VERBOSE("Sending response for SCP_READY command\n");
130
131 /*
132 * Send our response back to SCP.
133 * We are using the same SCPI header, just update the status field.
134 */
135 scpi_cmd.status = status;
136 scpi_secure_message_start();
137 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
138 scpi_secure_message_send(0);
139 scpi_secure_message_end();
Dan Handley9df48042015-03-19 18:58:55 +0000140
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000141 return status == SCP_OK ? 0 : -1;
Dan Handley9df48042015-03-19 18:58:55 +0000142}
143
Soby Mathew200fffd2016-10-21 11:34:59 +0100144void scpi_set_css_power_state(unsigned int mpidr,
145 scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
146 scpi_power_state_t css_state)
Dan Handley9df48042015-03-19 18:58:55 +0000147{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000148 scpi_cmd_t *cmd;
149 uint32_t state = 0;
150 uint32_t *payload_addr;
151
152 state |= mpidr & 0x0f; /* CPU ID */
Dan Handley9df48042015-03-19 18:58:55 +0000153 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
154 state |= cpu_state << 8;
155 state |= cluster_state << 12;
156 state |= css_state << 16;
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000157
158 scpi_secure_message_start();
159
160 /* Populate the command header */
161 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
162 cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
163 cmd->set = SCPI_SET_NORMAL;
164 cmd->sender = 0;
165 cmd->size = sizeof(state);
166 /* Populate the command payload */
167 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
168 *payload_addr = state;
169 scpi_secure_message_send(sizeof(state));
170 /*
171 * SCP does not reply to this command in order to avoid MHU interrupts
172 * from the sender, which could interfere with its power state request.
173 */
174
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100175 scpi_secure_message_end();
176}
177
178/*
179 * Query and obtain CSS power state from SCP.
180 *
181 * In response to the query, SCP returns power states of all CPUs in all
182 * clusters of the system. The returned response is then filtered based on the
183 * supplied MPIDR. Power states of requested cluster and CPUs within are updated
184 * via. supplied non-NULL pointer arguments.
185 *
186 * Returns 0 on success, or -1 on errors.
187 */
188int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
189 unsigned int *cluster_state_p)
190{
191 scpi_cmd_t *cmd;
192 scpi_cmd_t response;
193 int power_state, cpu, cluster, rc = -1;
194
195 /*
196 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
197 * for only up to 0xf clusters, and 8 CPUs per cluster
198 */
199 cpu = mpidr & MPIDR_AFFLVL_MASK;
200 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
201 if (cpu >= 8 || cluster >= 0xf)
202 return -1;
203
204 scpi_secure_message_start();
205
206 /* Populate request headers */
207 cmd = memset(SCPI_CMD_HEADER_AP_TO_SCP, 0, sizeof(*cmd));
208 cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
209
210 /*
211 * Send message and wait for SCP's response
212 */
213 scpi_secure_message_send(0);
214 scpi_secure_message_receive(&response);
215
216 if (response.status != SCP_OK)
217 goto exit;
218
219 /* Validate SCP response */
220 if (!CHECK_RESPONSE(response, cluster))
221 goto exit;
222
223 /* Extract power states for required cluster */
224 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
225 if (CLUSTER_ID(power_state) != cluster)
226 goto exit;
227
228 /* Update power state via. pointers */
229 if (cluster_state_p)
230 *cluster_state_p = CLUSTER_POWER_STATE(power_state);
231 if (cpu_state_p)
232 *cpu_state_p = CPU_POWER_STATE(power_state);
233 rc = 0;
234
235exit:
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000236 scpi_secure_message_end();
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100237 return rc;
Dan Handley9df48042015-03-19 18:58:55 +0000238}
239
240uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
241{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000242 scpi_cmd_t *cmd;
243 uint8_t *payload_addr;
244 scpi_cmd_t response;
245
246 scpi_secure_message_start();
Dan Handley9df48042015-03-19 18:58:55 +0000247
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000248 /* Populate the command header */
249 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
250 cmd->id = SCPI_CMD_SYS_POWER_STATE;
251 cmd->set = 0;
252 cmd->sender = 0;
253 cmd->size = sizeof(*payload_addr);
254 /* Populate the command payload */
255 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
256 *payload_addr = system_state & 0xff;
257 scpi_secure_message_send(sizeof(*payload_addr));
258
259 scpi_secure_message_receive(&response);
260
Dan Handley9df48042015-03-19 18:58:55 +0000261 scpi_secure_message_end();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000262
263 return response.status;
Dan Handley9df48042015-03-19 18:58:55 +0000264}