blob: 91d16f46a91c62804b07c1900672c8f4c4880e2a [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <assert.h>
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000034#include <runtime_svc.h>
35#include <debug.h>
Dan Handley714a0d22014-04-09 13:13:04 +010036#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010037
38/*******************************************************************************
39 * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
40 ******************************************************************************/
41int psci_cpu_on(unsigned long target_cpu,
42 unsigned long entrypoint,
43 unsigned long context_id)
44
45{
46 int rc;
Achin Gupta0959db52013-12-02 17:33:04 +000047 unsigned int start_afflvl, end_afflvl;
Soby Mathew8595b872015-01-06 15:36:38 +000048 entry_point_info_t ep;
Achin Gupta4f6ad662013-10-25 09:08:21 +010049
50 /* Determine if the cpu exists of not */
51 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
52 if (rc != PSCI_E_SUCCESS) {
Soby Mathew74e52a72014-10-02 16:56:51 +010053 return PSCI_E_INVALID_PARAMS;
54 }
55
56 /* Validate the entrypoint using platform pm_ops */
57 if (psci_plat_pm_ops->validate_ns_entrypoint) {
58 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
59 if (rc != PSCI_E_SUCCESS) {
60 assert(rc == PSCI_E_INVALID_PARAMS);
61 return PSCI_E_INVALID_PARAMS;
62 }
Achin Gupta4f6ad662013-10-25 09:08:21 +010063 }
64
Achin Gupta0959db52013-12-02 17:33:04 +000065 /*
Soby Mathew8595b872015-01-06 15:36:38 +000066 * Verify and derive the re-entry information for
67 * the non-secure world from the non-secure state from
68 * where this call originated.
69 */
70 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
71 if (rc != PSCI_E_SUCCESS)
72 return rc;
73
74
75 /*
Achin Gupta0959db52013-12-02 17:33:04 +000076 * To turn this cpu on, specify which affinity
77 * levels need to be turned on
78 */
79 start_afflvl = MPIDR_AFFLVL0;
80 end_afflvl = get_max_afflvl();
Achin Gupta4f6ad662013-10-25 09:08:21 +010081 rc = psci_afflvl_on(target_cpu,
Soby Mathew8595b872015-01-06 15:36:38 +000082 &ep,
Achin Gupta4f6ad662013-10-25 09:08:21 +010083 start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +000084 end_afflvl);
Achin Gupta4f6ad662013-10-25 09:08:21 +010085
Achin Gupta4f6ad662013-10-25 09:08:21 +010086 return rc;
87}
88
89unsigned int psci_version(void)
90{
91 return PSCI_MAJOR_VER | PSCI_MINOR_VER;
92}
93
94int psci_cpu_suspend(unsigned int power_state,
95 unsigned long entrypoint,
96 unsigned long context_id)
97{
98 int rc;
Achin Gupta0959db52013-12-02 17:33:04 +000099 unsigned int target_afflvl, pstate_type;
Soby Mathew8595b872015-01-06 15:36:38 +0000100 entry_point_info_t ep;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100101
Vikram Kanigirif100f412014-04-01 19:26:26 +0100102 /* Check SBZ bits in power state are zero */
103 if (psci_validate_power_state(power_state))
104 return PSCI_E_INVALID_PARAMS;
105
Achin Gupta4f6ad662013-10-25 09:08:21 +0100106 /* Sanity check the requested state */
Achin Gupta0959db52013-12-02 17:33:04 +0000107 target_afflvl = psci_get_pstate_afflvl(power_state);
Soby Mathew2b697502014-10-02 17:24:19 +0100108 if (target_afflvl > get_max_afflvl())
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000109 return PSCI_E_INVALID_PARAMS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110
Soby Mathew74e52a72014-10-02 16:56:51 +0100111 /* Validate the power_state using platform pm_ops */
112 if (psci_plat_pm_ops->validate_power_state) {
113 rc = psci_plat_pm_ops->validate_power_state(power_state);
114 if (rc != PSCI_E_SUCCESS) {
115 assert(rc == PSCI_E_INVALID_PARAMS);
116 return PSCI_E_INVALID_PARAMS;
117 }
118 }
119
120 /* Validate the entrypoint using platform pm_ops */
121 if (psci_plat_pm_ops->validate_ns_entrypoint) {
122 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
123 if (rc != PSCI_E_SUCCESS) {
124 assert(rc == PSCI_E_INVALID_PARAMS);
125 return PSCI_E_INVALID_PARAMS;
126 }
127 }
128
Achin Gupta42c52802014-05-09 19:32:25 +0100129 /* Determine the 'state type' in the 'power_state' parameter */
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000130 pstate_type = psci_get_pstate_type(power_state);
Achin Gupta42c52802014-05-09 19:32:25 +0100131
132 /*
133 * Ensure that we have a platform specific handler for entering
134 * a standby state.
135 */
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000136 if (pstate_type == PSTATE_TYPE_STANDBY) {
Achin Gupta42c52802014-05-09 19:32:25 +0100137 if (!psci_plat_pm_ops->affinst_standby)
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000138 return PSCI_E_INVALID_PARAMS;
Achin Gupta42c52802014-05-09 19:32:25 +0100139
Soby Mathew74e52a72014-10-02 16:56:51 +0100140 psci_plat_pm_ops->affinst_standby(power_state);
141 return PSCI_E_SUCCESS;
Vikram Kanigiri3b7c59b2014-03-21 11:57:10 +0000142 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100143
Achin Gupta42c52802014-05-09 19:32:25 +0100144 /*
Soby Mathew8595b872015-01-06 15:36:38 +0000145 * Verify and derive the re-entry information for
146 * the non-secure world from the non-secure state from
147 * where this call originated.
148 */
149 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
150 if (rc != PSCI_E_SUCCESS)
151 return rc;
152
Soby Mathewf5121572014-09-30 11:19:51 +0100153 /* Save PSCI power state parameter for the core in suspend context */
154 psci_set_suspend_power_state(power_state);
155
Soby Mathew8595b872015-01-06 15:36:38 +0000156 /*
Achin Gupta42c52802014-05-09 19:32:25 +0100157 * Do what is needed to enter the power down state. Upon success,
Soby Mathew74e52a72014-10-02 16:56:51 +0100158 * enter the final wfi which will power down this CPU.
Achin Gupta42c52802014-05-09 19:32:25 +0100159 */
Soby Mathew74e52a72014-10-02 16:56:51 +0100160 psci_afflvl_suspend(&ep,
161 MPIDR_AFFLVL0,
162 target_afflvl);
163
Soby Mathewf5121572014-09-30 11:19:51 +0100164 /* Reset PSCI power state parameter for the core. */
165 psci_set_suspend_power_state(PSCI_INVALID_DATA);
Soby Mathew74e52a72014-10-02 16:56:51 +0100166 return PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100167}
168
169int psci_cpu_off(void)
170{
171 int rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100172 int target_afflvl = get_max_afflvl();
173
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174 /*
175 * Traverse from the highest to the lowest affinity level. When the
176 * lowest affinity level is hit, all the locks are acquired. State
177 * management is done immediately followed by cpu, cluster ...
178 * ..target_afflvl specific actions as this function unwinds back.
179 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100180 rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100181
Achin Gupta3140a9e2013-12-02 16:23:12 +0000182 /*
183 * The only error cpu_off can return is E_DENIED. So check if that's
184 * indeed the case.
185 */
Achin Gupta42c52802014-05-09 19:32:25 +0100186 assert (rc == PSCI_E_DENIED);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100187
188 return rc;
189}
190
191int psci_affinity_info(unsigned long target_affinity,
192 unsigned int lowest_affinity_level)
193{
194 int rc = PSCI_E_INVALID_PARAMS;
195 unsigned int aff_state;
Dan Handleye2712bc2014-04-10 15:37:22 +0100196 aff_map_node_t *node;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100197
Achin Gupta75f73672013-12-05 16:33:10 +0000198 if (lowest_affinity_level > get_max_afflvl())
199 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100200
201 node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
202 if (node && (node->state & PSCI_AFF_PRESENT)) {
Achin Gupta75f73672013-12-05 16:33:10 +0000203
204 /*
205 * TODO: For affinity levels higher than 0 i.e. cpu, the
206 * state will always be either ON or OFF. Need to investigate
207 * how critical is it to support ON_PENDING here.
208 */
209 aff_state = psci_get_state(node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100210
211 /* A suspended cpu is available & on for the OS */
212 if (aff_state == PSCI_STATE_SUSPEND) {
213 aff_state = PSCI_STATE_ON;
214 }
215
216 rc = aff_state;
217 }
Achin Gupta75f73672013-12-05 16:33:10 +0000218
Achin Gupta4f6ad662013-10-25 09:08:21 +0100219 return rc;
220}
221
222/* Unimplemented */
223int psci_migrate(unsigned int target_cpu)
224{
225 return PSCI_E_NOT_SUPPORTED;
226}
227
228/* Unimplemented */
229unsigned int psci_migrate_info_type(void)
230{
Achin Gupta607084e2014-02-09 18:24:19 +0000231 return PSCI_TOS_NOT_PRESENT_MP;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100232}
233
234unsigned long psci_migrate_info_up_cpu(void)
235{
236 /*
237 * Return value of this currently unsupported call depends upon
238 * what psci_migrate_info_type() returns.
239 */
240 return PSCI_E_SUCCESS;
241}
242
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000243/*******************************************************************************
244 * PSCI top level handler for servicing SMCs.
245 ******************************************************************************/
246uint64_t psci_smc_handler(uint32_t smc_fid,
247 uint64_t x1,
248 uint64_t x2,
249 uint64_t x3,
250 uint64_t x4,
251 void *cookie,
252 void *handle,
253 uint64_t flags)
254{
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100255 if (is_caller_secure(flags))
256 SMC_RET1(handle, SMC_UNK);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000257
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100258 if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
259 /* 32-bit PSCI function, clear top parameter bits */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000260
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100261 x1 = (uint32_t)x1;
262 x2 = (uint32_t)x2;
263 x3 = (uint32_t)x3;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000264
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100265 switch (smc_fid) {
266 case PSCI_VERSION:
267 SMC_RET1(handle, psci_version());
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000268
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100269 case PSCI_CPU_OFF:
Achin Guptae1aa5162014-06-26 09:58:52 +0100270 SMC_RET1(handle, psci_cpu_off());
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000271
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100272 case PSCI_CPU_SUSPEND_AARCH32:
Achin Guptae1aa5162014-06-26 09:58:52 +0100273 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000274
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100275 case PSCI_CPU_ON_AARCH32:
276 SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000277
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100278 case PSCI_AFFINITY_INFO_AARCH32:
279 SMC_RET1(handle, psci_affinity_info(x1, x2));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000280
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100281 case PSCI_MIG_AARCH32:
282 SMC_RET1(handle, psci_migrate(x1));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000283
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100284 case PSCI_MIG_INFO_TYPE:
285 SMC_RET1(handle, psci_migrate_info_type());
286
287 case PSCI_MIG_INFO_UP_CPU_AARCH32:
288 SMC_RET1(handle, psci_migrate_info_up_cpu());
289
Juan Castillo4dc4a472014-08-12 11:17:06 +0100290 case PSCI_SYSTEM_OFF:
291 psci_system_off();
292 /* We should never return from psci_system_off() */
293
294 case PSCI_SYSTEM_RESET:
295 psci_system_reset();
296 /* We should never return from psci_system_reset() */
297
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100298 default:
299 break;
300 }
301 } else {
302 /* 64-bit PSCI function */
303
304 switch (smc_fid) {
305 case PSCI_CPU_SUSPEND_AARCH64:
Achin Guptae1aa5162014-06-26 09:58:52 +0100306 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100307
308 case PSCI_CPU_ON_AARCH64:
309 SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
310
311 case PSCI_AFFINITY_INFO_AARCH64:
312 SMC_RET1(handle, psci_affinity_info(x1, x2));
313
314 case PSCI_MIG_AARCH64:
315 SMC_RET1(handle, psci_migrate(x1));
316
317 case PSCI_MIG_INFO_UP_CPU_AARCH64:
318 SMC_RET1(handle, psci_migrate_info_up_cpu());
319
320 default:
321 break;
322 }
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000323 }
324
Andrew Thoelke89a3c842014-06-10 16:37:37 +0100325 WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
326 SMC_RET1(handle, SMC_UNK);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000327}