blob: 90a8939d985266903deb0185b7ff54490a456448 [file] [log] [blame]
Dan Handley9df48042015-03-19 18:58:55 +00001/*
Vikram Kanigiri72084192016-02-08 16:29:30 +00002 * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
Dan Handley9df48042015-03-19 18:58:55 +00003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000032#include <assert.h>
Dan Handley9df48042015-03-19 18:58:55 +000033#include <css_def.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000034#include <debug.h>
Dan Handley9df48042015-03-19 18:58:55 +000035#include <platform.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000036#include <string.h>
Dan Handley9df48042015-03-19 18:58:55 +000037#include "css_mhu.h"
38#include "css_scpi.h"
39
Vikram Kanigiri72084192016-02-08 16:29:30 +000040#define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE
41#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
42 + 0x100)
Dan Handley9df48042015-03-19 18:58:55 +000043
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010044/* Header and payload addresses for commands from AP to SCP */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000045#define SCPI_CMD_HEADER_AP_TO_SCP \
46 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
47#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
48 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
Dan Handley9df48042015-03-19 18:58:55 +000049
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010050/* Header and payload addresses for responses from SCP to AP */
51#define SCPI_RES_HEADER_SCP_TO_AP \
52 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
53#define SCPI_RES_PAYLOAD_SCP_TO_AP \
54 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
55
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000056/* ID of the MHU slot used for the SCPI protocol */
57#define SCPI_MHU_SLOT_ID 0
Dan Handley9df48042015-03-19 18:58:55 +000058
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000059static void scpi_secure_message_start(void)
Dan Handley9df48042015-03-19 18:58:55 +000060{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000061 mhu_secure_message_start(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000062}
63
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000064static void scpi_secure_message_send(size_t payload_size)
Dan Handley9df48042015-03-19 18:58:55 +000065{
Juan Castillo2e86cb12016-01-13 15:01:09 +000066 /* Ensure that any write to the SCPI payload area is seen by SCP before
67 * we write to the MHU register. If these 2 writes were reordered by
68 * the CPU then SCP would read stale payload data */
69 dmbst();
Dan Handley9df48042015-03-19 18:58:55 +000070
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000071 mhu_secure_message_send(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000072}
73
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000074static void scpi_secure_message_receive(scpi_cmd_t *cmd)
Dan Handley9df48042015-03-19 18:58:55 +000075{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000076 uint32_t mhu_status;
Dan Handley9df48042015-03-19 18:58:55 +000077
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000078 assert(cmd != NULL);
Dan Handley9df48042015-03-19 18:58:55 +000079
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000080 mhu_status = mhu_secure_message_wait();
81
82 /* Expect an SCPI message, reject any other protocol */
83 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
84 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
85 mhu_status);
86 panic();
87 }
Dan Handley9df48042015-03-19 18:58:55 +000088
Juan Castillo2e86cb12016-01-13 15:01:09 +000089 /* Ensure that any read to the SCPI payload area is done after reading
90 * the MHU register. If these 2 reads were reordered then the CPU would
91 * read invalid payload data */
92 dmbld();
Dan Handley9df48042015-03-19 18:58:55 +000093
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000094 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
Dan Handley9df48042015-03-19 18:58:55 +000095}
96
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000097static void scpi_secure_message_end(void)
Dan Handley9df48042015-03-19 18:58:55 +000098{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000099 mhu_secure_message_end(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +0000100}
101
102int scpi_wait_ready(void)
103{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000104 scpi_cmd_t scpi_cmd;
105
106 VERBOSE("Waiting for SCP_READY command...\n");
107
Dan Handley9df48042015-03-19 18:58:55 +0000108 /* Get a message from the SCP */
109 scpi_secure_message_start();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000110 scpi_secure_message_receive(&scpi_cmd);
Dan Handley9df48042015-03-19 18:58:55 +0000111 scpi_secure_message_end();
112
113 /* We are expecting 'SCP Ready', produce correct error if it's not */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000114 scpi_status_t status = SCP_OK;
115 if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
116 ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
117 SCPI_CMD_SCP_READY, scpi_cmd.id);
118 status = SCP_E_SUPPORT;
119 } else if (scpi_cmd.size != 0) {
120 ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
121 scpi_cmd.size);
122 status = SCP_E_SIZE;
123 }
Dan Handley9df48042015-03-19 18:58:55 +0000124
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000125 VERBOSE("Sending response for SCP_READY command\n");
126
127 /*
128 * Send our response back to SCP.
129 * We are using the same SCPI header, just update the status field.
130 */
131 scpi_cmd.status = status;
132 scpi_secure_message_start();
133 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
134 scpi_secure_message_send(0);
135 scpi_secure_message_end();
Dan Handley9df48042015-03-19 18:58:55 +0000136
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000137 return status == SCP_OK ? 0 : -1;
Dan Handley9df48042015-03-19 18:58:55 +0000138}
139
140void scpi_set_css_power_state(unsigned mpidr, scpi_power_state_t cpu_state,
141 scpi_power_state_t cluster_state, scpi_power_state_t css_state)
142{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000143 scpi_cmd_t *cmd;
144 uint32_t state = 0;
145 uint32_t *payload_addr;
146
147 state |= mpidr & 0x0f; /* CPU ID */
Dan Handley9df48042015-03-19 18:58:55 +0000148 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
149 state |= cpu_state << 8;
150 state |= cluster_state << 12;
151 state |= css_state << 16;
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000152
153 scpi_secure_message_start();
154
155 /* Populate the command header */
156 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
157 cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
158 cmd->set = SCPI_SET_NORMAL;
159 cmd->sender = 0;
160 cmd->size = sizeof(state);
161 /* Populate the command payload */
162 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
163 *payload_addr = state;
164 scpi_secure_message_send(sizeof(state));
165 /*
166 * SCP does not reply to this command in order to avoid MHU interrupts
167 * from the sender, which could interfere with its power state request.
168 */
169
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100170 scpi_secure_message_end();
171}
172
173/*
174 * Query and obtain CSS power state from SCP.
175 *
176 * In response to the query, SCP returns power states of all CPUs in all
177 * clusters of the system. The returned response is then filtered based on the
178 * supplied MPIDR. Power states of requested cluster and CPUs within are updated
179 * via. supplied non-NULL pointer arguments.
180 *
181 * Returns 0 on success, or -1 on errors.
182 */
183int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
184 unsigned int *cluster_state_p)
185{
186 scpi_cmd_t *cmd;
187 scpi_cmd_t response;
188 int power_state, cpu, cluster, rc = -1;
189
190 /*
191 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
192 * for only up to 0xf clusters, and 8 CPUs per cluster
193 */
194 cpu = mpidr & MPIDR_AFFLVL_MASK;
195 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
196 if (cpu >= 8 || cluster >= 0xf)
197 return -1;
198
199 scpi_secure_message_start();
200
201 /* Populate request headers */
202 cmd = memset(SCPI_CMD_HEADER_AP_TO_SCP, 0, sizeof(*cmd));
203 cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
204
205 /*
206 * Send message and wait for SCP's response
207 */
208 scpi_secure_message_send(0);
209 scpi_secure_message_receive(&response);
210
211 if (response.status != SCP_OK)
212 goto exit;
213
214 /* Validate SCP response */
215 if (!CHECK_RESPONSE(response, cluster))
216 goto exit;
217
218 /* Extract power states for required cluster */
219 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
220 if (CLUSTER_ID(power_state) != cluster)
221 goto exit;
222
223 /* Update power state via. pointers */
224 if (cluster_state_p)
225 *cluster_state_p = CLUSTER_POWER_STATE(power_state);
226 if (cpu_state_p)
227 *cpu_state_p = CPU_POWER_STATE(power_state);
228 rc = 0;
229
230exit:
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000231 scpi_secure_message_end();
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100232 return rc;
Dan Handley9df48042015-03-19 18:58:55 +0000233}
234
235uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
236{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000237 scpi_cmd_t *cmd;
238 uint8_t *payload_addr;
239 scpi_cmd_t response;
240
241 scpi_secure_message_start();
Dan Handley9df48042015-03-19 18:58:55 +0000242
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000243 /* Populate the command header */
244 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
245 cmd->id = SCPI_CMD_SYS_POWER_STATE;
246 cmd->set = 0;
247 cmd->sender = 0;
248 cmd->size = sizeof(*payload_addr);
249 /* Populate the command payload */
250 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
251 *payload_addr = system_state & 0xff;
252 scpi_secure_message_send(sizeof(*payload_addr));
253
254 scpi_secure_message_receive(&response);
255
Dan Handley9df48042015-03-19 18:58:55 +0000256 scpi_secure_message_end();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000257
258 return response.status;
Dan Handley9df48042015-03-19 18:58:55 +0000259}