blob: 2ed5760118a98f85f8f41c3ae4dd22a4308dd39a [file] [log] [blame]
Dan Handley9df48042015-03-19 18:58:55 +00001/*
Samarth Parikh59cfa132017-11-23 14:23:21 +05302 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
Dan Handley9df48042015-03-19 18:58:55 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Dan Handley9df48042015-03-19 18:58:55 +00005 */
6
7#include <arch_helpers.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +00008#include <assert.h>
Dan Handley9df48042015-03-19 18:58:55 +00009#include <css_def.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000010#include <debug.h>
Dan Handley9df48042015-03-19 18:58:55 +000011#include <platform.h>
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000012#include <string.h>
Douglas Raillarda8954fc2017-01-26 15:54:44 +000013#include <utils.h>
Samarth Parikh59cfa132017-11-23 14:23:21 +053014#include "../mhu/css_mhu.h"
Dan Handley9df48042015-03-19 18:58:55 +000015#include "css_scpi.h"
16
Vikram Kanigiri72084192016-02-08 16:29:30 +000017#define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE
18#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
19 + 0x100)
Dan Handley9df48042015-03-19 18:58:55 +000020
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010021/* Header and payload addresses for commands from AP to SCP */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000022#define SCPI_CMD_HEADER_AP_TO_SCP \
23 ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
24#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
25 ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
Dan Handley9df48042015-03-19 18:58:55 +000026
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +010027/* Header and payload addresses for responses from SCP to AP */
28#define SCPI_RES_HEADER_SCP_TO_AP \
29 ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
30#define SCPI_RES_PAYLOAD_SCP_TO_AP \
31 ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
32
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000033/* ID of the MHU slot used for the SCPI protocol */
34#define SCPI_MHU_SLOT_ID 0
Dan Handley9df48042015-03-19 18:58:55 +000035
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000036static void scpi_secure_message_start(void)
Dan Handley9df48042015-03-19 18:58:55 +000037{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000038 mhu_secure_message_start(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000039}
40
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000041static void scpi_secure_message_send(size_t payload_size)
Dan Handley9df48042015-03-19 18:58:55 +000042{
Soby Mathew200fffd2016-10-21 11:34:59 +010043 /*
44 * Ensure that any write to the SCPI payload area is seen by SCP before
Juan Castillo2e86cb12016-01-13 15:01:09 +000045 * we write to the MHU register. If these 2 writes were reordered by
Soby Mathew200fffd2016-10-21 11:34:59 +010046 * the CPU then SCP would read stale payload data
47 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000048 dmbst();
Dan Handley9df48042015-03-19 18:58:55 +000049
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000050 mhu_secure_message_send(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000051}
52
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000053static void scpi_secure_message_receive(scpi_cmd_t *cmd)
Dan Handley9df48042015-03-19 18:58:55 +000054{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000055 uint32_t mhu_status;
Dan Handley9df48042015-03-19 18:58:55 +000056
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000057 assert(cmd != NULL);
Dan Handley9df48042015-03-19 18:58:55 +000058
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000059 mhu_status = mhu_secure_message_wait();
60
61 /* Expect an SCPI message, reject any other protocol */
62 if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
63 ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
64 mhu_status);
65 panic();
66 }
Dan Handley9df48042015-03-19 18:58:55 +000067
Soby Mathew200fffd2016-10-21 11:34:59 +010068 /*
69 * Ensure that any read to the SCPI payload area is done after reading
Juan Castillo2e86cb12016-01-13 15:01:09 +000070 * the MHU register. If these 2 reads were reordered then the CPU would
Soby Mathew200fffd2016-10-21 11:34:59 +010071 * read invalid payload data
72 */
Juan Castillo2e86cb12016-01-13 15:01:09 +000073 dmbld();
Dan Handley9df48042015-03-19 18:58:55 +000074
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000075 memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
Dan Handley9df48042015-03-19 18:58:55 +000076}
77
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000078static void scpi_secure_message_end(void)
Dan Handley9df48042015-03-19 18:58:55 +000079{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000080 mhu_secure_message_end(SCPI_MHU_SLOT_ID);
Dan Handley9df48042015-03-19 18:58:55 +000081}
82
83int scpi_wait_ready(void)
84{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000085 scpi_cmd_t scpi_cmd;
86
87 VERBOSE("Waiting for SCP_READY command...\n");
88
Dan Handley9df48042015-03-19 18:58:55 +000089 /* Get a message from the SCP */
90 scpi_secure_message_start();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000091 scpi_secure_message_receive(&scpi_cmd);
Dan Handley9df48042015-03-19 18:58:55 +000092 scpi_secure_message_end();
93
94 /* We are expecting 'SCP Ready', produce correct error if it's not */
Sandrine Bailleux04b66d82015-03-18 14:52:53 +000095 scpi_status_t status = SCP_OK;
96 if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
97 ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
98 SCPI_CMD_SCP_READY, scpi_cmd.id);
99 status = SCP_E_SUPPORT;
100 } else if (scpi_cmd.size != 0) {
101 ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
102 scpi_cmd.size);
103 status = SCP_E_SIZE;
104 }
Dan Handley9df48042015-03-19 18:58:55 +0000105
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000106 VERBOSE("Sending response for SCP_READY command\n");
107
108 /*
109 * Send our response back to SCP.
110 * We are using the same SCPI header, just update the status field.
111 */
112 scpi_cmd.status = status;
113 scpi_secure_message_start();
114 memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
115 scpi_secure_message_send(0);
116 scpi_secure_message_end();
Dan Handley9df48042015-03-19 18:58:55 +0000117
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000118 return status == SCP_OK ? 0 : -1;
Dan Handley9df48042015-03-19 18:58:55 +0000119}
120
Soby Mathew200fffd2016-10-21 11:34:59 +0100121void scpi_set_css_power_state(unsigned int mpidr,
122 scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
123 scpi_power_state_t css_state)
Dan Handley9df48042015-03-19 18:58:55 +0000124{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000125 scpi_cmd_t *cmd;
126 uint32_t state = 0;
127 uint32_t *payload_addr;
128
Summer Qin93c812f2017-02-28 16:46:17 +0000129#if ARM_PLAT_MT
130 /*
131 * The current SCPI driver only caters for single-threaded platforms.
132 * Hence we ignore the thread ID (which is always 0) for such platforms.
133 */
134 state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */
135 state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */
136#else
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000137 state |= mpidr & 0x0f; /* CPU ID */
Dan Handley9df48042015-03-19 18:58:55 +0000138 state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
Summer Qin93c812f2017-02-28 16:46:17 +0000139#endif /* ARM_PLAT_MT */
140
Dan Handley9df48042015-03-19 18:58:55 +0000141 state |= cpu_state << 8;
142 state |= cluster_state << 12;
143 state |= css_state << 16;
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000144
145 scpi_secure_message_start();
146
147 /* Populate the command header */
148 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
149 cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
150 cmd->set = SCPI_SET_NORMAL;
151 cmd->sender = 0;
152 cmd->size = sizeof(state);
153 /* Populate the command payload */
154 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
155 *payload_addr = state;
156 scpi_secure_message_send(sizeof(state));
157 /*
158 * SCP does not reply to this command in order to avoid MHU interrupts
159 * from the sender, which could interfere with its power state request.
160 */
161
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100162 scpi_secure_message_end();
163}
164
165/*
166 * Query and obtain CSS power state from SCP.
167 *
168 * In response to the query, SCP returns power states of all CPUs in all
169 * clusters of the system. The returned response is then filtered based on the
170 * supplied MPIDR. Power states of requested cluster and CPUs within are updated
171 * via. supplied non-NULL pointer arguments.
172 *
173 * Returns 0 on success, or -1 on errors.
174 */
175int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
176 unsigned int *cluster_state_p)
177{
178 scpi_cmd_t *cmd;
179 scpi_cmd_t response;
180 int power_state, cpu, cluster, rc = -1;
181
182 /*
183 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
184 * for only up to 0xf clusters, and 8 CPUs per cluster
185 */
jagadeesh ujja64fa64b2017-05-11 16:32:18 +0530186#if ARM_PLAT_MT
187 /*
188 * The current SCPI driver only caters for single-threaded platforms.
189 * Hence we ignore the thread ID (which is always 0) for such platforms.
190 */
191 cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
192 cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
193#else
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100194 cpu = mpidr & MPIDR_AFFLVL_MASK;
195 cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
jagadeesh ujja64fa64b2017-05-11 16:32:18 +0530196#endif /* ARM_PLAT_MT */
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100197 if (cpu >= 8 || cluster >= 0xf)
198 return -1;
199
200 scpi_secure_message_start();
201
202 /* Populate request headers */
Douglas Raillarda8954fc2017-01-26 15:54:44 +0000203 zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
204 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100205 cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
206
207 /*
208 * Send message and wait for SCP's response
209 */
210 scpi_secure_message_send(0);
211 scpi_secure_message_receive(&response);
212
213 if (response.status != SCP_OK)
214 goto exit;
215
216 /* Validate SCP response */
217 if (!CHECK_RESPONSE(response, cluster))
218 goto exit;
219
220 /* Extract power states for required cluster */
221 power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
222 if (CLUSTER_ID(power_state) != cluster)
223 goto exit;
224
225 /* Update power state via. pointers */
226 if (cluster_state_p)
227 *cluster_state_p = CLUSTER_POWER_STATE(power_state);
228 if (cpu_state_p)
229 *cpu_state_p = CPU_POWER_STATE(power_state);
230 rc = 0;
231
232exit:
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000233 scpi_secure_message_end();
Jeenu Viswambharanb1f68092016-08-04 12:44:52 +0100234 return rc;
Dan Handley9df48042015-03-19 18:58:55 +0000235}
236
237uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
238{
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000239 scpi_cmd_t *cmd;
240 uint8_t *payload_addr;
241 scpi_cmd_t response;
242
243 scpi_secure_message_start();
Dan Handley9df48042015-03-19 18:58:55 +0000244
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000245 /* Populate the command header */
246 cmd = SCPI_CMD_HEADER_AP_TO_SCP;
247 cmd->id = SCPI_CMD_SYS_POWER_STATE;
248 cmd->set = 0;
249 cmd->sender = 0;
250 cmd->size = sizeof(*payload_addr);
251 /* Populate the command payload */
252 payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
253 *payload_addr = system_state & 0xff;
254 scpi_secure_message_send(sizeof(*payload_addr));
255
256 scpi_secure_message_receive(&response);
257
Dan Handley9df48042015-03-19 18:58:55 +0000258 scpi_secure_message_end();
Sandrine Bailleux04b66d82015-03-18 14:52:53 +0000259
260 return response.status;
Dan Handley9df48042015-03-19 18:58:55 +0000261}