blob: 443e6af4abdfd0c95276b4dc86fae7e3b997202d [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
32#include <arch_helpers.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010033#include <assert.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010034#include <bl_common.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010035#include <bl31.h>
Achin Gupta0a9f7472014-02-09 17:48:12 +000036#include <context_mgmt.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010037#include <platform.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010038#include <runtime_svc.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010039#include <stddef.h>
Dan Handley714a0d22014-04-09 13:13:04 +010040#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010041
Dan Handleye2712bc2014-04-10 15:37:22 +010042typedef int (*afflvl_on_handler_t)(unsigned long,
43 aff_map_node_t *,
Achin Gupta4f6ad662013-10-25 09:08:21 +010044 unsigned long,
45 unsigned long);
46
47/*******************************************************************************
48 * This function checks whether a cpu which has been requested to be turned on
49 * is OFF to begin with.
50 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +010051static int cpu_on_validate_state(aff_map_node_t *node)
Achin Gupta4f6ad662013-10-25 09:08:21 +010052{
53 unsigned int psci_state;
54
55 /* Get the raw psci state */
Achin Gupta75f73672013-12-05 16:33:10 +000056 psci_state = psci_get_state(node);
Achin Gupta4f6ad662013-10-25 09:08:21 +010057
58 if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
59 return PSCI_E_ALREADY_ON;
60
61 if (psci_state == PSCI_STATE_ON_PENDING)
62 return PSCI_E_ON_PENDING;
63
64 assert(psci_state == PSCI_STATE_OFF);
65 return PSCI_E_SUCCESS;
66}
67
68/*******************************************************************************
69 * Handler routine to turn a cpu on. It takes care of any generic, architectural
70 * or platform specific setup required.
71 * TODO: Split this code across separate handlers for each type of setup?
72 ******************************************************************************/
73static int psci_afflvl0_on(unsigned long target_cpu,
Dan Handleye2712bc2014-04-10 15:37:22 +010074 aff_map_node_t *cpu_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +010075 unsigned long ns_entrypoint,
76 unsigned long context_id)
77{
78 unsigned int index, plat_state;
79 unsigned long psci_entrypoint;
80 int rc;
81
82 /* Sanity check to safeguard against data corruption */
83 assert(cpu_node->level == MPIDR_AFFLVL0);
84
85 /*
86 * Generic management: Ensure that the cpu is off to be
87 * turned on
88 */
Achin Gupta75f73672013-12-05 16:33:10 +000089 rc = cpu_on_validate_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +010090 if (rc != PSCI_E_SUCCESS)
91 return rc;
92
93 /*
Achin Gupta607084e2014-02-09 18:24:19 +000094 * Call the cpu on handler registered by the Secure Payload Dispatcher
95 * to let it do any bookeeping. If the handler encounters an error, it's
96 * expected to assert within
97 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000098 if (psci_spd_pm && psci_spd_pm->svc_on)
99 psci_spd_pm->svc_on(target_cpu);
Achin Gupta607084e2014-02-09 18:24:19 +0000100
101 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100102 * Arch. management: Derive the re-entry information for
103 * the non-secure world from the non-secure state from
104 * where this call originated.
105 */
106 index = cpu_node->data;
107 rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
108 if (rc != PSCI_E_SUCCESS)
109 return rc;
110
111 /* Set the secure world (EL3) re-entry point after BL1 */
112 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
113
Achin Gupta75f73672013-12-05 16:33:10 +0000114 /* State management: Set this cpu's state as ON PENDING */
115 psci_set_state(cpu_node, PSCI_STATE_ON_PENDING);
116
Achin Gupta4f6ad662013-10-25 09:08:21 +0100117 /*
118 * Plat. management: Give the platform the current state
119 * of the target cpu to allow it to perform the necessary
120 * steps to power on.
121 */
122 if (psci_plat_pm_ops->affinst_on) {
123
124 /* Get the current physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000125 plat_state = psci_get_phys_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100126 rc = psci_plat_pm_ops->affinst_on(target_cpu,
127 psci_entrypoint,
128 ns_entrypoint,
129 cpu_node->level,
130 plat_state);
131 }
132
133 return rc;
134}
135
136/*******************************************************************************
137 * Handler routine to turn a cluster on. It takes care or any generic, arch.
138 * or platform specific setup required.
139 * TODO: Split this code across separate handlers for each type of setup?
140 ******************************************************************************/
141static int psci_afflvl1_on(unsigned long target_cpu,
Dan Handleye2712bc2014-04-10 15:37:22 +0100142 aff_map_node_t *cluster_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100143 unsigned long ns_entrypoint,
144 unsigned long context_id)
145{
146 int rc = PSCI_E_SUCCESS;
147 unsigned int plat_state;
148 unsigned long psci_entrypoint;
149
150 assert(cluster_node->level == MPIDR_AFFLVL1);
151
152 /*
153 * There is no generic and arch. specific cluster
154 * management required
155 */
156
Achin Gupta75f73672013-12-05 16:33:10 +0000157 /* State management: Is not required while turning a cluster on */
158
Achin Gupta4f6ad662013-10-25 09:08:21 +0100159 /*
160 * Plat. management: Give the platform the current state
161 * of the target cpu to allow it to perform the necessary
162 * steps to power on.
163 */
164 if (psci_plat_pm_ops->affinst_on) {
Achin Gupta75f73672013-12-05 16:33:10 +0000165 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100166 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
167 rc = psci_plat_pm_ops->affinst_on(target_cpu,
168 psci_entrypoint,
169 ns_entrypoint,
170 cluster_node->level,
171 plat_state);
172 }
173
174 return rc;
175}
176
177/*******************************************************************************
178 * Handler routine to turn a cluster of clusters on. It takes care or any
179 * generic, arch. or platform specific setup required.
180 * TODO: Split this code across separate handlers for each type of setup?
181 ******************************************************************************/
182static int psci_afflvl2_on(unsigned long target_cpu,
Dan Handleye2712bc2014-04-10 15:37:22 +0100183 aff_map_node_t *system_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100184 unsigned long ns_entrypoint,
185 unsigned long context_id)
186{
187 int rc = PSCI_E_SUCCESS;
188 unsigned int plat_state;
189 unsigned long psci_entrypoint;
190
191 /* Cannot go beyond affinity level 2 in this psci imp. */
192 assert(system_node->level == MPIDR_AFFLVL2);
193
194 /*
195 * There is no generic and arch. specific system management
196 * required
197 */
198
Achin Gupta75f73672013-12-05 16:33:10 +0000199 /* State management: Is not required while turning a system on */
200
Achin Gupta4f6ad662013-10-25 09:08:21 +0100201 /*
202 * Plat. management: Give the platform the current state
203 * of the target cpu to allow it to perform the necessary
204 * steps to power on.
205 */
206 if (psci_plat_pm_ops->affinst_on) {
Achin Gupta75f73672013-12-05 16:33:10 +0000207 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100208 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
209 rc = psci_plat_pm_ops->affinst_on(target_cpu,
210 psci_entrypoint,
211 ns_entrypoint,
212 system_node->level,
213 plat_state);
214 }
215
216 return rc;
217}
218
219/* Private data structure to make this handlers accessible through indexing */
Dan Handleye2712bc2014-04-10 15:37:22 +0100220static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100221 psci_afflvl0_on,
222 psci_afflvl1_on,
223 psci_afflvl2_on,
224};
225
226/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000227 * This function takes an array of pointers to affinity instance nodes in the
228 * topology tree and calls the on handler for the corresponding affinity
229 * levels
230 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100231static int psci_call_on_handlers(mpidr_aff_map_nodes_t target_cpu_nodes,
Achin Gupta0959db52013-12-02 17:33:04 +0000232 int start_afflvl,
233 int end_afflvl,
234 unsigned long target_cpu,
235 unsigned long entrypoint,
236 unsigned long context_id)
237{
238 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleye2712bc2014-04-10 15:37:22 +0100239 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000240
241 for (level = end_afflvl; level >= start_afflvl; level--) {
242 node = target_cpu_nodes[level];
243 if (node == NULL)
244 continue;
245
246 /*
247 * TODO: In case of an error should there be a way
248 * of undoing what we might have setup at higher
249 * affinity levels.
250 */
251 rc = psci_afflvl_on_handlers[level](target_cpu,
252 node,
253 entrypoint,
254 context_id);
255 if (rc != PSCI_E_SUCCESS)
256 break;
257 }
258
259 return rc;
260}
261
262/*******************************************************************************
263 * Generic handler which is called to physically power on a cpu identified by
264 * its mpidr. It traverses through all the affinity levels performing generic,
265 * architectural, platform setup and state management e.g. for a cpu that is
266 * to be powered on, it will ensure that enough information is stashed for it
267 * to resume execution in the non-secure security state.
268 *
269 * The state of all the relevant affinity levels is changed after calling the
270 * affinity level specific handlers as their actions would depend upon the state
271 * the affinity level is currently in.
272 *
273 * The affinity level specific handlers are called in descending order i.e. from
274 * the highest to the lowest affinity level implemented by the platform because
275 * to turn on affinity level X it is neccesary to turn on affinity level X + 1
276 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100277 ******************************************************************************/
278int psci_afflvl_on(unsigned long target_cpu,
279 unsigned long entrypoint,
280 unsigned long context_id,
Achin Gupta0959db52013-12-02 17:33:04 +0000281 int start_afflvl,
282 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100283{
Achin Gupta0959db52013-12-02 17:33:04 +0000284 int rc = PSCI_E_SUCCESS;
Dan Handleye2712bc2014-04-10 15:37:22 +0100285 mpidr_aff_map_nodes_t target_cpu_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100286 unsigned long mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
287
288 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000289 * Collect the pointers to the nodes in the topology tree for
290 * each affinity instance in the mpidr. If this function does
291 * not return successfully then either the mpidr or the affinity
292 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100293 */
Achin Gupta0959db52013-12-02 17:33:04 +0000294 rc = psci_get_aff_map_nodes(target_cpu,
295 start_afflvl,
296 end_afflvl,
297 target_cpu_nodes);
298 if (rc != PSCI_E_SUCCESS)
299 return rc;
300
Achin Gupta4f6ad662013-10-25 09:08:21 +0100301
302 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000303 * This function acquires the lock corresponding to each affinity
304 * level so that by the time all locks are taken, the system topology
305 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100306 */
Achin Gupta0959db52013-12-02 17:33:04 +0000307 psci_acquire_afflvl_locks(mpidr,
308 start_afflvl,
309 end_afflvl,
310 target_cpu_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100311
Achin Gupta0959db52013-12-02 17:33:04 +0000312 /* Perform generic, architecture and platform specific handling. */
313 rc = psci_call_on_handlers(target_cpu_nodes,
314 start_afflvl,
315 end_afflvl,
316 target_cpu,
317 entrypoint,
318 context_id);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100319
Achin Gupta4f6ad662013-10-25 09:08:21 +0100320 /*
321 * This loop releases the lock corresponding to each affinity level
Achin Gupta0959db52013-12-02 17:33:04 +0000322 * in the reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100323 */
Achin Gupta0959db52013-12-02 17:33:04 +0000324 psci_release_afflvl_locks(mpidr,
325 start_afflvl,
326 end_afflvl,
327 target_cpu_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100328
329 return rc;
330}
331
332/*******************************************************************************
333 * The following functions finish an earlier affinity power on request. They
334 * are called by the common finisher routine in psci_common.c.
335 ******************************************************************************/
336static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100337 aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100338{
Achin Gupta0959db52013-12-02 17:33:04 +0000339 unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100340
341 assert(cpu_node->level == MPIDR_AFFLVL0);
342
Achin Gupta0959db52013-12-02 17:33:04 +0000343 /* Ensure we have been explicitly woken up by another cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000344 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000345 assert(state == PSCI_STATE_ON_PENDING);
346
Achin Gupta4f6ad662013-10-25 09:08:21 +0100347 /*
348 * Plat. management: Perform the platform specific actions
349 * for this cpu e.g. enabling the gic or zeroing the mailbox
350 * register. The actual state of this cpu has already been
351 * changed.
352 */
353 if (psci_plat_pm_ops->affinst_on_finish) {
354
Achin Gupta0959db52013-12-02 17:33:04 +0000355 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000356 plat_state = get_phys_state(state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100357 rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
358 cpu_node->level,
359 plat_state);
360 assert(rc == PSCI_E_SUCCESS);
361 }
362
363 /*
364 * Arch. management: Turn on mmu & restore architectural state
365 */
Dan Handleyb226a4d2014-05-16 14:08:45 +0100366 bl31_plat_enable_mmu();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100367
368 /*
369 * All the platform specific actions for turning this cpu
370 * on have completed. Perform enough arch.initialization
371 * to run in the non-secure address space.
372 */
373 bl31_arch_setup();
374
375 /*
Achin Gupta607084e2014-02-09 18:24:19 +0000376 * Use the more complex exception vectors to enable SPD
377 * initialisation. SP_EL3 should point to a 'cpu_context'
Soby Mathew5e5c2072014-04-07 15:28:55 +0100378 * structure. The calling cpu should have set the
379 * context already
Achin Gupta607084e2014-02-09 18:24:19 +0000380 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100381 assert(cm_get_context(NON_SECURE));
Achin Gupta607084e2014-02-09 18:24:19 +0000382 cm_set_next_eret_context(NON_SECURE);
Soby Mathew5e5c2072014-04-07 15:28:55 +0100383 cm_init_pcpu_ptr_cache();
Achin Gupta607084e2014-02-09 18:24:19 +0000384 write_vbar_el3((uint64_t) runtime_exceptions);
385
386 /*
387 * Call the cpu on finish handler registered by the Secure Payload
388 * Dispatcher to let it do any bookeeping. If the handler encounters an
389 * error, it's expected to assert within
390 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000391 if (psci_spd_pm && psci_spd_pm->svc_on_finish)
392 psci_spd_pm->svc_on_finish(0);
Achin Gupta607084e2014-02-09 18:24:19 +0000393
394 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100395 * Generic management: Now we just need to retrieve the
396 * information that we had stashed away during the cpu_on
Achin Gupta3140a9e2013-12-02 16:23:12 +0000397 * call to set this cpu on its way. First get the index
Achin Gupta4f6ad662013-10-25 09:08:21 +0100398 * for restoring the re-entry info
399 */
400 index = cpu_node->data;
Achin Guptac8afc782013-11-25 18:45:02 +0000401 psci_get_ns_entry_info(index);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100402
Achin Gupta75f73672013-12-05 16:33:10 +0000403 /* State management: mark this cpu as on */
404 psci_set_state(cpu_node, PSCI_STATE_ON);
405
Achin Gupta4f6ad662013-10-25 09:08:21 +0100406 /* Clean caches before re-entering normal world */
407 dcsw_op_louis(DCCSW);
408
409 return rc;
410}
411
412static unsigned int psci_afflvl1_on_finish(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100413 aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100414{
Achin Gupta0959db52013-12-02 17:33:04 +0000415 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100416
417 assert(cluster_node->level == MPIDR_AFFLVL1);
418
419 /*
420 * Plat. management: Perform the platform specific actions
421 * as per the old state of the cluster e.g. enabling
422 * coherency at the interconnect depends upon the state with
423 * which this cluster was powered up. If anything goes wrong
424 * then assert as there is no way to recover from this
425 * situation.
426 */
427 if (psci_plat_pm_ops->affinst_on_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000428
429 /* Get the physical state of this cluster */
Achin Gupta75f73672013-12-05 16:33:10 +0000430 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100431 rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
432 cluster_node->level,
433 plat_state);
434 assert(rc == PSCI_E_SUCCESS);
435 }
436
Achin Gupta75f73672013-12-05 16:33:10 +0000437 /* State management: Increment the cluster reference count */
438 psci_set_state(cluster_node, PSCI_STATE_ON);
439
Achin Gupta4f6ad662013-10-25 09:08:21 +0100440 return rc;
441}
442
443
444static unsigned int psci_afflvl2_on_finish(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100445 aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100446{
Achin Gupta0959db52013-12-02 17:33:04 +0000447 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100448
449 /* Cannot go beyond this affinity level */
450 assert(system_node->level == MPIDR_AFFLVL2);
451
452 /*
453 * Currently, there are no architectural actions to perform
454 * at the system level.
455 */
456
457 /*
458 * Plat. management: Perform the platform specific actions
459 * as per the old state of the cluster e.g. enabling
460 * coherency at the interconnect depends upon the state with
461 * which this cluster was powered up. If anything goes wrong
462 * then assert as there is no way to recover from this
463 * situation.
464 */
465 if (psci_plat_pm_ops->affinst_on_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000466
467 /* Get the physical state of the system */
Achin Gupta75f73672013-12-05 16:33:10 +0000468 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100469 rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
470 system_node->level,
471 plat_state);
472 assert(rc == PSCI_E_SUCCESS);
473 }
474
Achin Gupta75f73672013-12-05 16:33:10 +0000475 /* State management: Increment the system reference count */
476 psci_set_state(system_node, PSCI_STATE_ON);
477
Achin Gupta4f6ad662013-10-25 09:08:21 +0100478 return rc;
479}
480
Dan Handleye2712bc2014-04-10 15:37:22 +0100481const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100482 psci_afflvl0_on_finish,
483 psci_afflvl1_on_finish,
484 psci_afflvl2_on_finish,
485};
486