blob: 3b7d8054545a29ed8f93b2e51130ea798308761e [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
32#include <arch_helpers.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010033#include <assert.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010034#include <bl_common.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010035#include <bl31.h>
Achin Gupta0a9f7472014-02-09 17:48:12 +000036#include <context_mgmt.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010037#include <platform.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010038#include <runtime_svc.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010039#include <stddef.h>
Dan Handley714a0d22014-04-09 13:13:04 +010040#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010041
Dan Handleye2712bc2014-04-10 15:37:22 +010042typedef int (*afflvl_on_handler_t)(unsigned long,
43 aff_map_node_t *,
Achin Gupta4f6ad662013-10-25 09:08:21 +010044 unsigned long,
45 unsigned long);
46
47/*******************************************************************************
48 * This function checks whether a cpu which has been requested to be turned on
49 * is OFF to begin with.
50 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +010051static int cpu_on_validate_state(aff_map_node_t *node)
Achin Gupta4f6ad662013-10-25 09:08:21 +010052{
53 unsigned int psci_state;
54
55 /* Get the raw psci state */
Achin Gupta75f73672013-12-05 16:33:10 +000056 psci_state = psci_get_state(node);
Achin Gupta4f6ad662013-10-25 09:08:21 +010057
58 if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
59 return PSCI_E_ALREADY_ON;
60
61 if (psci_state == PSCI_STATE_ON_PENDING)
62 return PSCI_E_ON_PENDING;
63
64 assert(psci_state == PSCI_STATE_OFF);
65 return PSCI_E_SUCCESS;
66}
67
68/*******************************************************************************
69 * Handler routine to turn a cpu on. It takes care of any generic, architectural
70 * or platform specific setup required.
71 * TODO: Split this code across separate handlers for each type of setup?
72 ******************************************************************************/
73static int psci_afflvl0_on(unsigned long target_cpu,
Dan Handleye2712bc2014-04-10 15:37:22 +010074 aff_map_node_t *cpu_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +010075 unsigned long ns_entrypoint,
76 unsigned long context_id)
77{
Andrew Thoelke4e126072014-06-04 21:10:52 +010078 unsigned int plat_state;
Achin Gupta4f6ad662013-10-25 09:08:21 +010079 unsigned long psci_entrypoint;
Andrew Thoelke4e126072014-06-04 21:10:52 +010080 uint32_t ns_scr_el3 = read_scr_el3();
81 uint32_t ns_sctlr_el1 = read_sctlr_el1();
Achin Gupta4f6ad662013-10-25 09:08:21 +010082 int rc;
83
84 /* Sanity check to safeguard against data corruption */
85 assert(cpu_node->level == MPIDR_AFFLVL0);
86
87 /*
88 * Generic management: Ensure that the cpu is off to be
89 * turned on
90 */
Achin Gupta75f73672013-12-05 16:33:10 +000091 rc = cpu_on_validate_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +010092 if (rc != PSCI_E_SUCCESS)
93 return rc;
94
95 /*
Achin Gupta607084e2014-02-09 18:24:19 +000096 * Call the cpu on handler registered by the Secure Payload Dispatcher
97 * to let it do any bookeeping. If the handler encounters an error, it's
98 * expected to assert within
99 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000100 if (psci_spd_pm && psci_spd_pm->svc_on)
101 psci_spd_pm->svc_on(target_cpu);
Achin Gupta607084e2014-02-09 18:24:19 +0000102
103 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100104 * Arch. management: Derive the re-entry information for
105 * the non-secure world from the non-secure state from
106 * where this call originated.
107 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100108 rc = psci_save_ns_entry(target_cpu, ns_entrypoint, context_id,
109 ns_scr_el3, ns_sctlr_el1);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110 if (rc != PSCI_E_SUCCESS)
111 return rc;
112
113 /* Set the secure world (EL3) re-entry point after BL1 */
114 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
115
Achin Gupta75f73672013-12-05 16:33:10 +0000116 /* State management: Set this cpu's state as ON PENDING */
117 psci_set_state(cpu_node, PSCI_STATE_ON_PENDING);
118
Achin Gupta4f6ad662013-10-25 09:08:21 +0100119 /*
120 * Plat. management: Give the platform the current state
121 * of the target cpu to allow it to perform the necessary
122 * steps to power on.
123 */
124 if (psci_plat_pm_ops->affinst_on) {
125
126 /* Get the current physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000127 plat_state = psci_get_phys_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100128 rc = psci_plat_pm_ops->affinst_on(target_cpu,
129 psci_entrypoint,
130 ns_entrypoint,
131 cpu_node->level,
132 plat_state);
133 }
134
135 return rc;
136}
137
138/*******************************************************************************
139 * Handler routine to turn a cluster on. It takes care or any generic, arch.
140 * or platform specific setup required.
141 * TODO: Split this code across separate handlers for each type of setup?
142 ******************************************************************************/
143static int psci_afflvl1_on(unsigned long target_cpu,
Dan Handleye2712bc2014-04-10 15:37:22 +0100144 aff_map_node_t *cluster_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100145 unsigned long ns_entrypoint,
146 unsigned long context_id)
147{
148 int rc = PSCI_E_SUCCESS;
149 unsigned int plat_state;
150 unsigned long psci_entrypoint;
151
152 assert(cluster_node->level == MPIDR_AFFLVL1);
153
154 /*
155 * There is no generic and arch. specific cluster
156 * management required
157 */
158
Achin Gupta75f73672013-12-05 16:33:10 +0000159 /* State management: Is not required while turning a cluster on */
160
Achin Gupta4f6ad662013-10-25 09:08:21 +0100161 /*
162 * Plat. management: Give the platform the current state
163 * of the target cpu to allow it to perform the necessary
164 * steps to power on.
165 */
166 if (psci_plat_pm_ops->affinst_on) {
Achin Gupta75f73672013-12-05 16:33:10 +0000167 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100168 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
169 rc = psci_plat_pm_ops->affinst_on(target_cpu,
170 psci_entrypoint,
171 ns_entrypoint,
172 cluster_node->level,
173 plat_state);
174 }
175
176 return rc;
177}
178
179/*******************************************************************************
180 * Handler routine to turn a cluster of clusters on. It takes care or any
181 * generic, arch. or platform specific setup required.
182 * TODO: Split this code across separate handlers for each type of setup?
183 ******************************************************************************/
184static int psci_afflvl2_on(unsigned long target_cpu,
Dan Handleye2712bc2014-04-10 15:37:22 +0100185 aff_map_node_t *system_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100186 unsigned long ns_entrypoint,
187 unsigned long context_id)
188{
189 int rc = PSCI_E_SUCCESS;
190 unsigned int plat_state;
191 unsigned long psci_entrypoint;
192
193 /* Cannot go beyond affinity level 2 in this psci imp. */
194 assert(system_node->level == MPIDR_AFFLVL2);
195
196 /*
197 * There is no generic and arch. specific system management
198 * required
199 */
200
Achin Gupta75f73672013-12-05 16:33:10 +0000201 /* State management: Is not required while turning a system on */
202
Achin Gupta4f6ad662013-10-25 09:08:21 +0100203 /*
204 * Plat. management: Give the platform the current state
205 * of the target cpu to allow it to perform the necessary
206 * steps to power on.
207 */
208 if (psci_plat_pm_ops->affinst_on) {
Achin Gupta75f73672013-12-05 16:33:10 +0000209 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100210 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
211 rc = psci_plat_pm_ops->affinst_on(target_cpu,
212 psci_entrypoint,
213 ns_entrypoint,
214 system_node->level,
215 plat_state);
216 }
217
218 return rc;
219}
220
221/* Private data structure to make this handlers accessible through indexing */
Dan Handleye2712bc2014-04-10 15:37:22 +0100222static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100223 psci_afflvl0_on,
224 psci_afflvl1_on,
225 psci_afflvl2_on,
226};
227
228/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000229 * This function takes an array of pointers to affinity instance nodes in the
230 * topology tree and calls the on handler for the corresponding affinity
231 * levels
232 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100233static int psci_call_on_handlers(mpidr_aff_map_nodes_t target_cpu_nodes,
Achin Gupta0959db52013-12-02 17:33:04 +0000234 int start_afflvl,
235 int end_afflvl,
236 unsigned long target_cpu,
237 unsigned long entrypoint,
238 unsigned long context_id)
239{
240 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleye2712bc2014-04-10 15:37:22 +0100241 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000242
243 for (level = end_afflvl; level >= start_afflvl; level--) {
244 node = target_cpu_nodes[level];
245 if (node == NULL)
246 continue;
247
248 /*
249 * TODO: In case of an error should there be a way
250 * of undoing what we might have setup at higher
251 * affinity levels.
252 */
253 rc = psci_afflvl_on_handlers[level](target_cpu,
254 node,
255 entrypoint,
256 context_id);
257 if (rc != PSCI_E_SUCCESS)
258 break;
259 }
260
261 return rc;
262}
263
264/*******************************************************************************
265 * Generic handler which is called to physically power on a cpu identified by
266 * its mpidr. It traverses through all the affinity levels performing generic,
267 * architectural, platform setup and state management e.g. for a cpu that is
268 * to be powered on, it will ensure that enough information is stashed for it
269 * to resume execution in the non-secure security state.
270 *
271 * The state of all the relevant affinity levels is changed after calling the
272 * affinity level specific handlers as their actions would depend upon the state
273 * the affinity level is currently in.
274 *
275 * The affinity level specific handlers are called in descending order i.e. from
276 * the highest to the lowest affinity level implemented by the platform because
277 * to turn on affinity level X it is neccesary to turn on affinity level X + 1
278 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100279 ******************************************************************************/
280int psci_afflvl_on(unsigned long target_cpu,
281 unsigned long entrypoint,
282 unsigned long context_id,
Achin Gupta0959db52013-12-02 17:33:04 +0000283 int start_afflvl,
284 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100285{
Achin Gupta0959db52013-12-02 17:33:04 +0000286 int rc = PSCI_E_SUCCESS;
Dan Handleye2712bc2014-04-10 15:37:22 +0100287 mpidr_aff_map_nodes_t target_cpu_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100288
289 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000290 * Collect the pointers to the nodes in the topology tree for
291 * each affinity instance in the mpidr. If this function does
292 * not return successfully then either the mpidr or the affinity
293 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100294 */
Achin Gupta0959db52013-12-02 17:33:04 +0000295 rc = psci_get_aff_map_nodes(target_cpu,
296 start_afflvl,
297 end_afflvl,
298 target_cpu_nodes);
299 if (rc != PSCI_E_SUCCESS)
300 return rc;
301
Achin Gupta4f6ad662013-10-25 09:08:21 +0100302
303 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000304 * This function acquires the lock corresponding to each affinity
305 * level so that by the time all locks are taken, the system topology
306 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100307 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100308 psci_acquire_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000309 end_afflvl,
310 target_cpu_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100311
Achin Gupta0959db52013-12-02 17:33:04 +0000312 /* Perform generic, architecture and platform specific handling. */
313 rc = psci_call_on_handlers(target_cpu_nodes,
314 start_afflvl,
315 end_afflvl,
316 target_cpu,
317 entrypoint,
318 context_id);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100319
Achin Gupta4f6ad662013-10-25 09:08:21 +0100320 /*
321 * This loop releases the lock corresponding to each affinity level
Achin Gupta0959db52013-12-02 17:33:04 +0000322 * in the reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100323 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100324 psci_release_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000325 end_afflvl,
326 target_cpu_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100327
328 return rc;
329}
330
331/*******************************************************************************
332 * The following functions finish an earlier affinity power on request. They
333 * are called by the common finisher routine in psci_common.c.
334 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100335static unsigned int psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100336{
Andrew Thoelke4e126072014-06-04 21:10:52 +0100337 unsigned int plat_state, state, rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100338
339 assert(cpu_node->level == MPIDR_AFFLVL0);
340
Achin Gupta0959db52013-12-02 17:33:04 +0000341 /* Ensure we have been explicitly woken up by another cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000342 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000343 assert(state == PSCI_STATE_ON_PENDING);
344
Achin Gupta4f6ad662013-10-25 09:08:21 +0100345 /*
346 * Plat. management: Perform the platform specific actions
347 * for this cpu e.g. enabling the gic or zeroing the mailbox
348 * register. The actual state of this cpu has already been
349 * changed.
350 */
351 if (psci_plat_pm_ops->affinst_on_finish) {
352
Achin Gupta0959db52013-12-02 17:33:04 +0000353 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000354 plat_state = get_phys_state(state);
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100355 rc = psci_plat_pm_ops->affinst_on_finish(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100356 cpu_node->level,
357 plat_state);
358 assert(rc == PSCI_E_SUCCESS);
359 }
360
361 /*
Achin Guptae1aa5162014-06-26 09:58:52 +0100362 * Arch. management: Enable data cache and manage stack memory
Achin Gupta4f6ad662013-10-25 09:08:21 +0100363 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100364 psci_do_pwrup_cache_maintenance();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100365
366 /*
367 * All the platform specific actions for turning this cpu
368 * on have completed. Perform enough arch.initialization
369 * to run in the non-secure address space.
370 */
371 bl31_arch_setup();
372
373 /*
Achin Gupta607084e2014-02-09 18:24:19 +0000374 * Call the cpu on finish handler registered by the Secure Payload
375 * Dispatcher to let it do any bookeeping. If the handler encounters an
376 * error, it's expected to assert within
377 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000378 if (psci_spd_pm && psci_spd_pm->svc_on_finish)
379 psci_spd_pm->svc_on_finish(0);
Achin Gupta607084e2014-02-09 18:24:19 +0000380
381 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100382 * Generic management: Now we just need to retrieve the
383 * information that we had stashed away during the cpu_on
Andrew Thoelke4e126072014-06-04 21:10:52 +0100384 * call to set this cpu on its way.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100385 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100386 cm_prepare_el3_exit(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100387
Achin Gupta75f73672013-12-05 16:33:10 +0000388 /* State management: mark this cpu as on */
389 psci_set_state(cpu_node, PSCI_STATE_ON);
390
Achin Gupta4f6ad662013-10-25 09:08:21 +0100391 /* Clean caches before re-entering normal world */
392 dcsw_op_louis(DCCSW);
393
Andrew Thoelke4e126072014-06-04 21:10:52 +0100394 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100395 return rc;
396}
397
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100398static unsigned int psci_afflvl1_on_finish(aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100399{
Achin Gupta0959db52013-12-02 17:33:04 +0000400 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100401
402 assert(cluster_node->level == MPIDR_AFFLVL1);
403
404 /*
405 * Plat. management: Perform the platform specific actions
406 * as per the old state of the cluster e.g. enabling
407 * coherency at the interconnect depends upon the state with
408 * which this cluster was powered up. If anything goes wrong
409 * then assert as there is no way to recover from this
410 * situation.
411 */
412 if (psci_plat_pm_ops->affinst_on_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000413
414 /* Get the physical state of this cluster */
Achin Gupta75f73672013-12-05 16:33:10 +0000415 plat_state = psci_get_phys_state(cluster_node);
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100416 rc = psci_plat_pm_ops->affinst_on_finish(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100417 cluster_node->level,
418 plat_state);
419 assert(rc == PSCI_E_SUCCESS);
420 }
421
Achin Gupta75f73672013-12-05 16:33:10 +0000422 /* State management: Increment the cluster reference count */
423 psci_set_state(cluster_node, PSCI_STATE_ON);
424
Achin Gupta4f6ad662013-10-25 09:08:21 +0100425 return rc;
426}
427
428
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100429static unsigned int psci_afflvl2_on_finish(aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100430{
Achin Gupta0959db52013-12-02 17:33:04 +0000431 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100432
433 /* Cannot go beyond this affinity level */
434 assert(system_node->level == MPIDR_AFFLVL2);
435
436 /*
437 * Currently, there are no architectural actions to perform
438 * at the system level.
439 */
440
441 /*
442 * Plat. management: Perform the platform specific actions
443 * as per the old state of the cluster e.g. enabling
444 * coherency at the interconnect depends upon the state with
445 * which this cluster was powered up. If anything goes wrong
446 * then assert as there is no way to recover from this
447 * situation.
448 */
449 if (psci_plat_pm_ops->affinst_on_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000450
451 /* Get the physical state of the system */
Achin Gupta75f73672013-12-05 16:33:10 +0000452 plat_state = psci_get_phys_state(system_node);
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100453 rc = psci_plat_pm_ops->affinst_on_finish(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100454 system_node->level,
455 plat_state);
456 assert(rc == PSCI_E_SUCCESS);
457 }
458
Achin Gupta75f73672013-12-05 16:33:10 +0000459 /* State management: Increment the system reference count */
460 psci_set_state(system_node, PSCI_STATE_ON);
461
Achin Gupta4f6ad662013-10-25 09:08:21 +0100462 return rc;
463}
464
Dan Handleye2712bc2014-04-10 15:37:22 +0100465const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100466 psci_afflvl0_on_finish,
467 psci_afflvl1_on_finish,
468 psci_afflvl2_on_finish,
469};
470