blob: 4391580be270df61d816857d91144b93b680a5bd [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <stdio.h>
32#include <string.h>
33#include <assert.h>
Achin Gupta0a9f7472014-02-09 17:48:12 +000034#include <debug.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010035#include <arch_helpers.h>
36#include <console.h>
37#include <platform.h>
38#include <psci.h>
39#include <psci_private.h>
Achin Guptaef7a28c2014-02-01 08:59:56 +000040#include <context_mgmt.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010041
42typedef int (*afflvl_suspend_handler)(unsigned long,
43 aff_map_node *,
44 unsigned long,
45 unsigned long,
46 unsigned int);
47
48/*******************************************************************************
Achin Guptaa45e3972013-12-05 15:10:48 +000049 * This function sets the affinity level till which the current cpu is being
50 * powered down to during a cpu_suspend call
51 ******************************************************************************/
52void psci_set_suspend_afflvl(aff_map_node *node, int afflvl)
53{
54 /*
55 * Check that nobody else is calling this function on our behalf &
56 * this information is being set only in the cpu node
57 */
58 assert(node->mpidr == (read_mpidr() & MPIDR_AFFINITY_MASK));
59 assert(node->level == MPIDR_AFFLVL0);
60
61 /*
62 * Store the affinity level we are powering down to in our context.
63 * The cache flush in the suspend code will ensure that this info
64 * is available immediately upon resuming.
65 */
66 psci_suspend_context[node->data].suspend_level = afflvl;
67}
68
69/*******************************************************************************
70 * This function gets the affinity level till which the current cpu was powered
71 * down during a cpu_suspend call.
72 ******************************************************************************/
73int psci_get_suspend_afflvl(aff_map_node *node)
74{
75 /* Return the target affinity level */
76 return psci_suspend_context[node->data].suspend_level;
77}
78
79/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +010080 * The next three functions implement a handler for each supported affinity
81 * level which is called when that affinity level is about to be suspended.
82 ******************************************************************************/
83static int psci_afflvl0_suspend(unsigned long mpidr,
84 aff_map_node *cpu_node,
85 unsigned long ns_entrypoint,
86 unsigned long context_id,
87 unsigned int power_state)
88{
89 unsigned int index, plat_state;
90 unsigned long psci_entrypoint, sctlr = read_sctlr();
Achin Gupta0a9f7472014-02-09 17:48:12 +000091 el3_state *saved_el3_state;
Achin Gupta4f6ad662013-10-25 09:08:21 +010092 int rc = PSCI_E_SUCCESS;
93
94 /* Sanity check to safeguard against data corruption */
95 assert(cpu_node->level == MPIDR_AFFLVL0);
96
Achin Gupta75f73672013-12-05 16:33:10 +000097 /* State management: mark this cpu as suspended */
98 psci_set_state(cpu_node, PSCI_STATE_SUSPEND);
99
Achin Gupta4f6ad662013-10-25 09:08:21 +0100100 /*
101 * Generic management: Store the re-entry information for the
102 * non-secure world
103 */
104 index = cpu_node->data;
105 rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
106 if (rc != PSCI_E_SUCCESS)
107 return rc;
108
109 /*
Achin Guptaef7a28c2014-02-01 08:59:56 +0000110 * Arch. management: Save the EL3 state in the 'cpu_context'
111 * structure that has been allocated for this cpu, flush the
Achin Gupta4f6ad662013-10-25 09:08:21 +0100112 * L1 caches and exit intra-cluster coherency et al
113 */
Achin Guptaef7a28c2014-02-01 08:59:56 +0000114 cm_el3_sysregs_context_save(NON_SECURE);
115 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100116
Achin Gupta0a9f7472014-02-09 17:48:12 +0000117 /*
118 * The EL3 state to PoC since it will be accessed after a
119 * reset with the caches turned off
120 */
121 saved_el3_state = get_el3state_ctx(cm_get_context(mpidr, NON_SECURE));
122 flush_dcache_range((uint64_t) saved_el3_state, sizeof(*saved_el3_state));
123
Achin Gupta4f6ad662013-10-25 09:08:21 +0100124 /* Set the secure world (EL3) re-entry point after BL1 */
125 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
126
127 /*
128 * Arch. management. Perform the necessary steps to flush all
129 * cpu caches.
130 *
131 * TODO: This power down sequence varies across cpus so it needs to be
132 * abstracted out on the basis of the MIDR like in cpu_reset_handler().
133 * Do the bare minimal for the time being. Fix this before porting to
134 * Cortex models.
135 */
136 sctlr &= ~SCTLR_C_BIT;
137 write_sctlr(sctlr);
138
139 /*
140 * CAUTION: This flush to the level of unification makes an assumption
141 * about the cache hierarchy at affinity level 0 (cpu) in the platform.
142 * Ideally the platform should tell psci which levels to flush to exit
143 * coherency.
144 */
145 dcsw_op_louis(DCCISW);
146
147 /*
148 * Plat. management: Allow the platform to perform the
149 * necessary actions to turn off this cpu e.g. set the
150 * platform defined mailbox with the psci entrypoint,
151 * program the power controller etc.
152 */
153 if (psci_plat_pm_ops->affinst_suspend) {
Achin Gupta75f73672013-12-05 16:33:10 +0000154 plat_state = psci_get_phys_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100155 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
156 psci_entrypoint,
157 ns_entrypoint,
158 cpu_node->level,
159 plat_state);
160 }
161
162 return rc;
163}
164
165static int psci_afflvl1_suspend(unsigned long mpidr,
166 aff_map_node *cluster_node,
167 unsigned long ns_entrypoint,
168 unsigned long context_id,
169 unsigned int power_state)
170{
171 int rc = PSCI_E_SUCCESS;
172 unsigned int plat_state;
173 unsigned long psci_entrypoint;
174
175 /* Sanity check the cluster level */
176 assert(cluster_node->level == MPIDR_AFFLVL1);
177
Achin Gupta75f73672013-12-05 16:33:10 +0000178 /* State management: Decrement the cluster reference count */
179 psci_set_state(cluster_node, PSCI_STATE_SUSPEND);
180
Achin Gupta4f6ad662013-10-25 09:08:21 +0100181 /*
182 * Keep the physical state of this cluster handy to decide
183 * what action needs to be taken
184 */
Achin Gupta75f73672013-12-05 16:33:10 +0000185 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100186
187 /*
188 * Arch. management: Flush all levels of caches to PoC if the
189 * cluster is to be shutdown
190 */
191 if (plat_state == PSCI_STATE_OFF)
192 dcsw_op_all(DCCISW);
193
194 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000195 * Plat. Management. Allow the platform to do its cluster
Achin Gupta4f6ad662013-10-25 09:08:21 +0100196 * specific bookeeping e.g. turn off interconnect coherency,
197 * program the power controller etc.
198 */
199 if (psci_plat_pm_ops->affinst_suspend) {
200
201 /*
202 * Sending the psci entrypoint is currently redundant
203 * beyond affinity level 0 but one never knows what a
204 * platform might do. Also it allows us to keep the
205 * platform handler prototype the same.
206 */
207 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100208 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
209 psci_entrypoint,
210 ns_entrypoint,
211 cluster_node->level,
212 plat_state);
213 }
214
215 return rc;
216}
217
218
219static int psci_afflvl2_suspend(unsigned long mpidr,
220 aff_map_node *system_node,
221 unsigned long ns_entrypoint,
222 unsigned long context_id,
223 unsigned int power_state)
224{
225 int rc = PSCI_E_SUCCESS;
226 unsigned int plat_state;
227 unsigned long psci_entrypoint;
228
229 /* Cannot go beyond this */
230 assert(system_node->level == MPIDR_AFFLVL2);
231
Achin Gupta75f73672013-12-05 16:33:10 +0000232 /* State management: Decrement the system reference count */
233 psci_set_state(system_node, PSCI_STATE_SUSPEND);
234
Achin Gupta4f6ad662013-10-25 09:08:21 +0100235 /*
236 * Keep the physical state of the system handy to decide what
237 * action needs to be taken
238 */
Achin Gupta75f73672013-12-05 16:33:10 +0000239 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100240
241 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000242 * Plat. Management : Allow the platform to do its bookeeping
Achin Gupta4f6ad662013-10-25 09:08:21 +0100243 * at this affinity level
244 */
245 if (psci_plat_pm_ops->affinst_suspend) {
246
247 /*
248 * Sending the psci entrypoint is currently redundant
249 * beyond affinity level 0 but one never knows what a
250 * platform might do. Also it allows us to keep the
251 * platform handler prototype the same.
252 */
253 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100254 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
255 psci_entrypoint,
256 ns_entrypoint,
257 system_node->level,
258 plat_state);
259 }
260
261 return rc;
262}
263
264static const afflvl_suspend_handler psci_afflvl_suspend_handlers[] = {
265 psci_afflvl0_suspend,
266 psci_afflvl1_suspend,
267 psci_afflvl2_suspend,
268};
269
270/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000271 * This function takes an array of pointers to affinity instance nodes in the
272 * topology tree and calls the suspend handler for the corresponding affinity
273 * levels
274 ******************************************************************************/
275static int psci_call_suspend_handlers(mpidr_aff_map_nodes mpidr_nodes,
276 int start_afflvl,
277 int end_afflvl,
278 unsigned long mpidr,
279 unsigned long entrypoint,
280 unsigned long context_id,
281 unsigned int power_state)
282{
283 int rc = PSCI_E_INVALID_PARAMS, level;
284 aff_map_node *node;
285
286 for (level = start_afflvl; level <= end_afflvl; level++) {
287 node = mpidr_nodes[level];
288 if (node == NULL)
289 continue;
290
291 /*
292 * TODO: In case of an error should there be a way
293 * of restoring what we might have torn down at
294 * lower affinity levels.
295 */
296 rc = psci_afflvl_suspend_handlers[level](mpidr,
297 node,
298 entrypoint,
299 context_id,
300 power_state);
301 if (rc != PSCI_E_SUCCESS)
302 break;
303 }
304
305 return rc;
306}
307
308/*******************************************************************************
309 * Top level handler which is called when a cpu wants to suspend its execution.
310 * It is assumed that along with turning the cpu off, higher affinity levels
311 * until the target affinity level will be turned off as well. It traverses
312 * through all the affinity levels performing generic, architectural, platform
313 * setup and state management e.g. for a cluster that's to be suspended, it will
314 * call the platform specific code which will disable coherency at the
315 * interconnect level if the cpu is the last in the cluster. For a cpu it could
316 * mean programming the power controller etc.
317 *
318 * The state of all the relevant affinity levels is changed prior to calling the
319 * affinity level specific handlers as their actions would depend upon the state
320 * the affinity level is about to enter.
321 *
322 * The affinity level specific handlers are called in ascending order i.e. from
323 * the lowest to the highest affinity level implemented by the platform because
324 * to turn off affinity level X it is neccesary to turn off affinity level X - 1
325 * first.
326 *
327 * CAUTION: This function is called with coherent stacks so that coherency can
328 * be turned off and caches can be flushed safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100329 ******************************************************************************/
330int psci_afflvl_suspend(unsigned long mpidr,
331 unsigned long entrypoint,
332 unsigned long context_id,
333 unsigned int power_state,
Achin Gupta0959db52013-12-02 17:33:04 +0000334 int start_afflvl,
335 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100336{
Achin Gupta0959db52013-12-02 17:33:04 +0000337 int rc = PSCI_E_SUCCESS;
Achin Gupta0959db52013-12-02 17:33:04 +0000338 mpidr_aff_map_nodes mpidr_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100339
340 mpidr &= MPIDR_AFFINITY_MASK;
341
342 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000343 * Collect the pointers to the nodes in the topology tree for
344 * each affinity instance in the mpidr. If this function does
345 * not return successfully then either the mpidr or the affinity
346 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100347 */
Achin Gupta0959db52013-12-02 17:33:04 +0000348 rc = psci_get_aff_map_nodes(mpidr,
349 start_afflvl,
350 end_afflvl,
351 mpidr_nodes);
352 if (rc != PSCI_E_SUCCESS)
353 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100354
355 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000356 * This function acquires the lock corresponding to each affinity
357 * level so that by the time all locks are taken, the system topology
358 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100359 */
Achin Gupta0959db52013-12-02 17:33:04 +0000360 psci_acquire_afflvl_locks(mpidr,
361 start_afflvl,
362 end_afflvl,
363 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100364
Achin Gupta0959db52013-12-02 17:33:04 +0000365
Achin Guptaa45e3972013-12-05 15:10:48 +0000366 /* Save the affinity level till which this cpu can be powered down */
367 psci_set_suspend_afflvl(mpidr_nodes[MPIDR_AFFLVL0], end_afflvl);
368
Achin Gupta0959db52013-12-02 17:33:04 +0000369 /* Perform generic, architecture and platform specific handling */
370 rc = psci_call_suspend_handlers(mpidr_nodes,
371 start_afflvl,
372 end_afflvl,
373 mpidr,
374 entrypoint,
375 context_id,
376 power_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100377
378 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000379 * Release the locks corresponding to each affinity level in the
380 * reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100381 */
Achin Gupta0959db52013-12-02 17:33:04 +0000382 psci_release_afflvl_locks(mpidr,
383 start_afflvl,
384 end_afflvl,
385 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100386
Achin Gupta4f6ad662013-10-25 09:08:21 +0100387 return rc;
388}
389
390/*******************************************************************************
391 * The following functions finish an earlier affinity suspend request. They
392 * are called by the common finisher routine in psci_common.c.
393 ******************************************************************************/
394static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
Achin Gupta0959db52013-12-02 17:33:04 +0000395 aff_map_node *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100396{
Achin Gupta0959db52013-12-02 17:33:04 +0000397 unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100398
399 assert(cpu_node->level == MPIDR_AFFLVL0);
400
Achin Gupta0959db52013-12-02 17:33:04 +0000401 /* Ensure we have been woken up from a suspended state */
Achin Gupta75f73672013-12-05 16:33:10 +0000402 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000403 assert(state == PSCI_STATE_SUSPEND);
404
Achin Gupta4f6ad662013-10-25 09:08:21 +0100405 /*
406 * Plat. management: Perform the platform specific actions
407 * before we change the state of the cpu e.g. enabling the
408 * gic or zeroing the mailbox register. If anything goes
409 * wrong then assert as there is no way to recover from this
410 * situation.
411 */
412 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000413
414 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000415 plat_state = get_phys_state(state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100416 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
417 cpu_node->level,
418 plat_state);
419 assert(rc == PSCI_E_SUCCESS);
420 }
421
422 /* Get the index for restoring the re-entry information */
423 index = cpu_node->data;
424
425 /*
Achin Guptaef7a28c2014-02-01 08:59:56 +0000426 * Arch. management: Restore the stashed EL3 architectural
427 * context from the 'cpu_context' structure for this cpu.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100428 */
Achin Guptaef7a28c2014-02-01 08:59:56 +0000429 cm_el3_sysregs_context_restore(NON_SECURE);
430 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100431
432 /*
433 * Generic management: Now we just need to retrieve the
434 * information that we had stashed away during the suspend
Achin Gupta3140a9e2013-12-02 16:23:12 +0000435 * call to set this cpu on its way.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100436 */
Achin Guptac8afc782013-11-25 18:45:02 +0000437 psci_get_ns_entry_info(index);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100438
Achin Gupta75f73672013-12-05 16:33:10 +0000439 /* State management: mark this cpu as on */
440 psci_set_state(cpu_node, PSCI_STATE_ON);
441
Achin Gupta4f6ad662013-10-25 09:08:21 +0100442 /* Clean caches before re-entering normal world */
443 dcsw_op_louis(DCCSW);
444
445 return rc;
446}
447
448static unsigned int psci_afflvl1_suspend_finish(unsigned long mpidr,
Achin Gupta0959db52013-12-02 17:33:04 +0000449 aff_map_node *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100450{
Achin Gupta0959db52013-12-02 17:33:04 +0000451 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100452
453 assert(cluster_node->level == MPIDR_AFFLVL1);
454
455 /*
456 * Plat. management: Perform the platform specific actions
457 * as per the old state of the cluster e.g. enabling
458 * coherency at the interconnect depends upon the state with
459 * which this cluster was powered up. If anything goes wrong
460 * then assert as there is no way to recover from this
461 * situation.
462 */
463 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000464
465 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000466 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100467 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
468 cluster_node->level,
469 plat_state);
470 assert(rc == PSCI_E_SUCCESS);
471 }
472
Achin Gupta75f73672013-12-05 16:33:10 +0000473 /* State management: Increment the cluster reference count */
474 psci_set_state(cluster_node, PSCI_STATE_ON);
475
Achin Gupta4f6ad662013-10-25 09:08:21 +0100476 return rc;
477}
478
479
480static unsigned int psci_afflvl2_suspend_finish(unsigned long mpidr,
Achin Gupta0959db52013-12-02 17:33:04 +0000481 aff_map_node *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100482{
Achin Gupta0959db52013-12-02 17:33:04 +0000483 unsigned int plat_state, rc = PSCI_E_SUCCESS;;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100484
485 /* Cannot go beyond this affinity level */
486 assert(system_node->level == MPIDR_AFFLVL2);
487
488 /*
489 * Currently, there are no architectural actions to perform
490 * at the system level.
491 */
492
493 /*
494 * Plat. management: Perform the platform specific actions
495 * as per the old state of the cluster e.g. enabling
496 * coherency at the interconnect depends upon the state with
497 * which this cluster was powered up. If anything goes wrong
498 * then assert as there is no way to recover from this
499 * situation.
500 */
501 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000502
503 /* Get the physical state of the system */
Achin Gupta75f73672013-12-05 16:33:10 +0000504 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100505 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
506 system_node->level,
507 plat_state);
508 assert(rc == PSCI_E_SUCCESS);
509 }
510
Achin Gupta75f73672013-12-05 16:33:10 +0000511 /* State management: Increment the system reference count */
512 psci_set_state(system_node, PSCI_STATE_ON);
513
Achin Gupta4f6ad662013-10-25 09:08:21 +0100514 return rc;
515}
516
517const afflvl_power_on_finisher psci_afflvl_suspend_finishers[] = {
518 psci_afflvl0_suspend_finish,
519 psci_afflvl1_suspend_finish,
520 psci_afflvl2_suspend_finish,
521};
522