blob: d8a00097bab9877898f38ee8e64870255e35ec8a [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <assert.h>
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000034#include <runtime_svc.h>
Soby Mathew6cdddaf2015-01-07 11:10:22 +000035#include <std_svc.h>
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000036#include <debug.h>
Dan Handley714a0d22014-04-09 13:13:04 +010037#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010038
39/*******************************************************************************
40 * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
41 ******************************************************************************/
42int psci_cpu_on(unsigned long target_cpu,
43 unsigned long entrypoint,
44 unsigned long context_id)
45
46{
47 int rc;
Achin Gupta0959db52013-12-02 17:33:04 +000048 unsigned int start_afflvl, end_afflvl;
Soby Mathew8595b872015-01-06 15:36:38 +000049 entry_point_info_t ep;
Achin Gupta4f6ad662013-10-25 09:08:21 +010050
51 /* Determine if the cpu exists of not */
52 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
53 if (rc != PSCI_E_SUCCESS) {
Soby Mathew74e52a72014-10-02 16:56:51 +010054 return PSCI_E_INVALID_PARAMS;
55 }
56
57 /* Validate the entrypoint using platform pm_ops */
58 if (psci_plat_pm_ops->validate_ns_entrypoint) {
59 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
60 if (rc != PSCI_E_SUCCESS) {
61 assert(rc == PSCI_E_INVALID_PARAMS);
62 return PSCI_E_INVALID_PARAMS;
63 }
Achin Gupta4f6ad662013-10-25 09:08:21 +010064 }
65
Achin Gupta0959db52013-12-02 17:33:04 +000066 /*
Soby Mathew8595b872015-01-06 15:36:38 +000067 * Verify and derive the re-entry information for
68 * the non-secure world from the non-secure state from
69 * where this call originated.
70 */
71 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
72 if (rc != PSCI_E_SUCCESS)
73 return rc;
74
75
76 /*
Achin Gupta0959db52013-12-02 17:33:04 +000077 * To turn this cpu on, specify which affinity
78 * levels need to be turned on
79 */
80 start_afflvl = MPIDR_AFFLVL0;
81 end_afflvl = get_max_afflvl();
Achin Gupta4f6ad662013-10-25 09:08:21 +010082 rc = psci_afflvl_on(target_cpu,
Soby Mathew8595b872015-01-06 15:36:38 +000083 &ep,
Achin Gupta4f6ad662013-10-25 09:08:21 +010084 start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +000085 end_afflvl);
Achin Gupta4f6ad662013-10-25 09:08:21 +010086
Achin Gupta4f6ad662013-10-25 09:08:21 +010087 return rc;
88}
89
90unsigned int psci_version(void)
91{
92 return PSCI_MAJOR_VER | PSCI_MINOR_VER;
93}
94
95int psci_cpu_suspend(unsigned int power_state,
96 unsigned long entrypoint,
97 unsigned long context_id)
98{
99 int rc;
Achin Gupta0959db52013-12-02 17:33:04 +0000100 unsigned int target_afflvl, pstate_type;
Soby Mathew8595b872015-01-06 15:36:38 +0000101 entry_point_info_t ep;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100102
Vikram Kanigirif100f412014-04-01 19:26:26 +0100103 /* Check SBZ bits in power state are zero */
104 if (psci_validate_power_state(power_state))
105 return PSCI_E_INVALID_PARAMS;
106
Achin Gupta4f6ad662013-10-25 09:08:21 +0100107 /* Sanity check the requested state */
Achin Gupta0959db52013-12-02 17:33:04 +0000108 target_afflvl = psci_get_pstate_afflvl(power_state);
Soby Mathew2b697502014-10-02 17:24:19 +0100109 if (target_afflvl > get_max_afflvl())
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000110 return PSCI_E_INVALID_PARAMS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100111
Soby Mathew74e52a72014-10-02 16:56:51 +0100112 /* Validate the power_state using platform pm_ops */
113 if (psci_plat_pm_ops->validate_power_state) {
114 rc = psci_plat_pm_ops->validate_power_state(power_state);
115 if (rc != PSCI_E_SUCCESS) {
116 assert(rc == PSCI_E_INVALID_PARAMS);
117 return PSCI_E_INVALID_PARAMS;
118 }
119 }
120
121 /* Validate the entrypoint using platform pm_ops */
122 if (psci_plat_pm_ops->validate_ns_entrypoint) {
123 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
124 if (rc != PSCI_E_SUCCESS) {
125 assert(rc == PSCI_E_INVALID_PARAMS);
126 return PSCI_E_INVALID_PARAMS;
127 }
128 }
129
Achin Gupta42c52802014-05-09 19:32:25 +0100130 /* Determine the 'state type' in the 'power_state' parameter */
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000131 pstate_type = psci_get_pstate_type(power_state);
Achin Gupta42c52802014-05-09 19:32:25 +0100132
133 /*
134 * Ensure that we have a platform specific handler for entering
135 * a standby state.
136 */
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000137 if (pstate_type == PSTATE_TYPE_STANDBY) {
Achin Gupta42c52802014-05-09 19:32:25 +0100138 if (!psci_plat_pm_ops->affinst_standby)
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000139 return PSCI_E_INVALID_PARAMS;
Achin Gupta42c52802014-05-09 19:32:25 +0100140
Soby Mathew74e52a72014-10-02 16:56:51 +0100141 psci_plat_pm_ops->affinst_standby(power_state);
142 return PSCI_E_SUCCESS;
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000143 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100144
Achin Gupta42c52802014-05-09 19:32:25 +0100145 /*
Soby Mathew8595b872015-01-06 15:36:38 +0000146 * Verify and derive the re-entry information for
147 * the non-secure world from the non-secure state from
148 * where this call originated.
149 */
150 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
151 if (rc != PSCI_E_SUCCESS)
152 return rc;
153
Soby Mathewf5121572014-09-30 11:19:51 +0100154 /* Save PSCI power state parameter for the core in suspend context */
155 psci_set_suspend_power_state(power_state);
156
Soby Mathew8595b872015-01-06 15:36:38 +0000157 /*
Achin Gupta42c52802014-05-09 19:32:25 +0100158 * Do what is needed to enter the power down state. Upon success,
Soby Mathew74e52a72014-10-02 16:56:51 +0100159 * enter the final wfi which will power down this CPU.
Achin Gupta42c52802014-05-09 19:32:25 +0100160 */
Soby Mathew74e52a72014-10-02 16:56:51 +0100161 psci_afflvl_suspend(&ep,
162 MPIDR_AFFLVL0,
163 target_afflvl);
164
Soby Mathewf5121572014-09-30 11:19:51 +0100165 /* Reset PSCI power state parameter for the core. */
166 psci_set_suspend_power_state(PSCI_INVALID_DATA);
Soby Mathew74e52a72014-10-02 16:56:51 +0100167 return PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100168}
169
170int psci_cpu_off(void)
171{
172 int rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100173 int target_afflvl = get_max_afflvl();
174
Achin Gupta4f6ad662013-10-25 09:08:21 +0100175 /*
176 * Traverse from the highest to the lowest affinity level. When the
177 * lowest affinity level is hit, all the locks are acquired. State
178 * management is done immediately followed by cpu, cluster ...
179 * ..target_afflvl specific actions as this function unwinds back.
180 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100181 rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100182
Achin Gupta3140a9e2013-12-02 16:23:12 +0000183 /*
184 * The only error cpu_off can return is E_DENIED. So check if that's
185 * indeed the case.
186 */
Achin Gupta42c52802014-05-09 19:32:25 +0100187 assert (rc == PSCI_E_DENIED);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100188
189 return rc;
190}
191
192int psci_affinity_info(unsigned long target_affinity,
193 unsigned int lowest_affinity_level)
194{
195 int rc = PSCI_E_INVALID_PARAMS;
196 unsigned int aff_state;
Dan Handleye2712bc2014-04-10 15:37:22 +0100197 aff_map_node_t *node;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100198
Achin Gupta75f73672013-12-05 16:33:10 +0000199 if (lowest_affinity_level > get_max_afflvl())
200 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100201
202 node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
203 if (node && (node->state & PSCI_AFF_PRESENT)) {
Achin Gupta75f73672013-12-05 16:33:10 +0000204
205 /*
206 * TODO: For affinity levels higher than 0 i.e. cpu, the
207 * state will always be either ON or OFF. Need to investigate
208 * how critical is it to support ON_PENDING here.
209 */
210 aff_state = psci_get_state(node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100211
212 /* A suspended cpu is available & on for the OS */
213 if (aff_state == PSCI_STATE_SUSPEND) {
214 aff_state = PSCI_STATE_ON;
215 }
216
217 rc = aff_state;
218 }
Achin Gupta75f73672013-12-05 16:33:10 +0000219
Achin Gupta4f6ad662013-10-25 09:08:21 +0100220 return rc;
221}
222
Soby Mathew110fe362014-10-23 10:35:34 +0100223int psci_migrate(unsigned long target_cpu)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100224{
Soby Mathew110fe362014-10-23 10:35:34 +0100225 int rc;
226 unsigned long resident_cpu_mpidr;
227
228 rc = psci_spd_migrate_info(&resident_cpu_mpidr);
229 if (rc != PSCI_TOS_UP_MIG_CAP)
230 return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
231 PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
232
233 /*
234 * Migrate should only be invoked on the CPU where
235 * the Secure OS is resident.
236 */
237 if (resident_cpu_mpidr != read_mpidr_el1())
238 return PSCI_E_NOT_PRESENT;
239
240 /* Check the validity of the specified target cpu */
241 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
242 if (rc != PSCI_E_SUCCESS)
243 return PSCI_E_INVALID_PARAMS;
244
245 assert(psci_spd_pm && psci_spd_pm->svc_migrate);
246
247 rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
248 assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
249
250 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100251}
252
Soby Mathew110fe362014-10-23 10:35:34 +0100253int psci_migrate_info_type(void)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100254{
Soby Mathew110fe362014-10-23 10:35:34 +0100255 unsigned long resident_cpu_mpidr;
256
257 return psci_spd_migrate_info(&resident_cpu_mpidr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100258}
259
Soby Mathew110fe362014-10-23 10:35:34 +0100260long psci_migrate_info_up_cpu(void)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100261{
Soby Mathew110fe362014-10-23 10:35:34 +0100262 unsigned long resident_cpu_mpidr;
263 int rc;
264
Achin Gupta4f6ad662013-10-25 09:08:21 +0100265 /*
Soby Mathew110fe362014-10-23 10:35:34 +0100266 * Return value of this depends upon what
267 * psci_spd_migrate_info() returns.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100268 */
Soby Mathew110fe362014-10-23 10:35:34 +0100269 rc = psci_spd_migrate_info(&resident_cpu_mpidr);
270 if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
271 return PSCI_E_INVALID_PARAMS;
272
273 return resident_cpu_mpidr;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100274}
275
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000276int psci_features(unsigned int psci_fid)
277{
278 uint32_t local_caps = psci_caps;
279
280 /* Check if it is a 64 bit function */
281 if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
282 local_caps &= PSCI_CAP_64BIT_MASK;
283
284 /* Check for invalid fid */
285 if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid)
286 && is_psci_fid(psci_fid)))
287 return PSCI_E_NOT_SUPPORTED;
288
289
290 /* Check if the psci fid is supported or not */
291 if (!(local_caps & define_psci_cap(psci_fid)))
292 return PSCI_E_NOT_SUPPORTED;
293
294 /* Format the feature flags */
295 if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
296 psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
297 /*
298 * The trusted firmware uses the original power state format
299 * and does not support OS Initiated Mode.
300 */
301 return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) |
302 ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
303 }
304
305 /* Return 0 for all other fid's */
306 return PSCI_E_SUCCESS;
307}
308
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000309/*******************************************************************************
310 * PSCI top level handler for servicing SMCs.
311 ******************************************************************************/
312uint64_t psci_smc_handler(uint32_t smc_fid,
313 uint64_t x1,
314 uint64_t x2,
315 uint64_t x3,
316 uint64_t x4,
317 void *cookie,
318 void *handle,
319 uint64_t flags)
320{
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100321 if (is_caller_secure(flags))
322 SMC_RET1(handle, SMC_UNK);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000323
Soby Mathew61e615b2015-01-15 11:49:49 +0000324 /* Check the fid against the capabilities */
325 if (!(psci_caps & define_psci_cap(smc_fid)))
326 SMC_RET1(handle, SMC_UNK);
327
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100328 if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
329 /* 32-bit PSCI function, clear top parameter bits */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000330
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100331 x1 = (uint32_t)x1;
332 x2 = (uint32_t)x2;
333 x3 = (uint32_t)x3;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000334
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100335 switch (smc_fid) {
336 case PSCI_VERSION:
337 SMC_RET1(handle, psci_version());
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000338
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100339 case PSCI_CPU_OFF:
Achin Guptae1aa5162014-06-26 09:58:52 +0100340 SMC_RET1(handle, psci_cpu_off());
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000341
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100342 case PSCI_CPU_SUSPEND_AARCH32:
Achin Guptae1aa5162014-06-26 09:58:52 +0100343 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000344
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100345 case PSCI_CPU_ON_AARCH32:
346 SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000347
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100348 case PSCI_AFFINITY_INFO_AARCH32:
349 SMC_RET1(handle, psci_affinity_info(x1, x2));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000350
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100351 case PSCI_MIG_AARCH32:
352 SMC_RET1(handle, psci_migrate(x1));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000353
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100354 case PSCI_MIG_INFO_TYPE:
355 SMC_RET1(handle, psci_migrate_info_type());
356
357 case PSCI_MIG_INFO_UP_CPU_AARCH32:
358 SMC_RET1(handle, psci_migrate_info_up_cpu());
359
Juan Castillo4dc4a472014-08-12 11:17:06 +0100360 case PSCI_SYSTEM_OFF:
361 psci_system_off();
362 /* We should never return from psci_system_off() */
363
364 case PSCI_SYSTEM_RESET:
365 psci_system_reset();
366 /* We should never return from psci_system_reset() */
367
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000368 case PSCI_FEATURES:
369 SMC_RET1(handle, psci_features(x1));
370
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100371 default:
372 break;
373 }
374 } else {
375 /* 64-bit PSCI function */
376
377 switch (smc_fid) {
378 case PSCI_CPU_SUSPEND_AARCH64:
Achin Guptae1aa5162014-06-26 09:58:52 +0100379 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100380
381 case PSCI_CPU_ON_AARCH64:
382 SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
383
384 case PSCI_AFFINITY_INFO_AARCH64:
385 SMC_RET1(handle, psci_affinity_info(x1, x2));
386
387 case PSCI_MIG_AARCH64:
388 SMC_RET1(handle, psci_migrate(x1));
389
390 case PSCI_MIG_INFO_UP_CPU_AARCH64:
391 SMC_RET1(handle, psci_migrate_info_up_cpu());
392
393 default:
394 break;
395 }
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000396 }
397
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100398 WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
399 SMC_RET1(handle, SMC_UNK);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000400}