blob: 214db7808278b555397136207950b179541f0c94 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <stdio.h>
32#include <string.h>
33#include <assert.h>
34#include <arch_helpers.h>
35#include <console.h>
36#include <platform.h>
37#include <psci.h>
38#include <psci_private.h>
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000039#include <context_mgmt.h>
Achin Guptac8afc782013-11-25 18:45:02 +000040#include <runtime_svc.h>
James Morrissey40a6f642014-02-10 14:24:36 +000041#include "debug.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010042
43/*******************************************************************************
44 * Arrays that contains information needs to resume a cpu's execution when woken
45 * out of suspend or off states. 'psci_ns_einfo_idx' keeps track of the next
Achin Guptaa59caa42013-12-05 14:21:04 +000046 * free index in the 'psci_ns_entry_info' & 'psci_suspend_context' arrays. Each
Achin Gupta4f6ad662013-10-25 09:08:21 +010047 * cpu is allocated a single entry in each array during startup.
48 ******************************************************************************/
Achin Guptaa59caa42013-12-05 14:21:04 +000049suspend_context psci_suspend_context[PSCI_NUM_AFFS];
Achin Gupta4f6ad662013-10-25 09:08:21 +010050ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
51unsigned int psci_ns_einfo_idx;
52
53/*******************************************************************************
54 * Grand array that holds the platform's topology information for state
55 * management of affinity instances. Each node (aff_map_node) in the array
56 * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
57 ******************************************************************************/
58aff_map_node psci_aff_map[PSCI_NUM_AFFS]
59__attribute__ ((section("tzfw_coherent_mem")));
60
61/*******************************************************************************
62 * In a system, a certain number of affinity instances are present at an
63 * affinity level. The cumulative number of instances across all levels are
64 * stored in 'psci_aff_map'. The topology tree has been flattenned into this
65 * array. To retrieve nodes, information about the extents of each affinity
66 * level i.e. start index and end index needs to be present. 'psci_aff_limits'
67 * stores this information.
68 ******************************************************************************/
69aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
70
71/*******************************************************************************
72 * Pointer to functions exported by the platform to complete power mgmt. ops
73 ******************************************************************************/
74plat_pm_ops *psci_plat_pm_ops;
75
76/*******************************************************************************
Achin Guptaa45e3972013-12-05 15:10:48 +000077 * Routine to return the maximum affinity level to traverse to after a cpu has
78 * been physically powered up. It is expected to be called immediately after
79 * reset from assembler code. It has to find its 'aff_map_node' instead of
80 * getting it as an argument.
81 * TODO: Calling psci_get_aff_map_node() with the MMU disabled is slow. Add
82 * support to allow faster access to the target affinity level.
83 ******************************************************************************/
84int get_power_on_target_afflvl(unsigned long mpidr)
85{
86 aff_map_node *node;
87 unsigned int state;
88
89 /* Retrieve our node from the topology tree */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000090 node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK,
91 MPIDR_AFFLVL0);
Achin Guptaa45e3972013-12-05 15:10:48 +000092 assert(node);
93
94 /*
95 * Return the maximum supported affinity level if this cpu was off.
96 * Call the handler in the suspend code if this cpu had been suspended.
97 * Any other state is invalid.
98 */
Achin Gupta75f73672013-12-05 16:33:10 +000099 state = psci_get_state(node);
Achin Guptaa45e3972013-12-05 15:10:48 +0000100 if (state == PSCI_STATE_ON_PENDING)
101 return get_max_afflvl();
102
103 if (state == PSCI_STATE_SUSPEND)
104 return psci_get_suspend_afflvl(node);
105
106 return PSCI_E_INVALID_PARAMS;
107}
108
109/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110 * Simple routine to retrieve the maximum affinity level supported by the
111 * platform and check that it makes sense.
112 ******************************************************************************/
113int get_max_afflvl()
114{
115 int aff_lvl;
116
117 aff_lvl = plat_get_max_afflvl();
118 assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
119
120 return aff_lvl;
121}
122
123/*******************************************************************************
124 * Simple routine to set the id of an affinity instance at a given level in the
125 * mpidr.
126 ******************************************************************************/
127unsigned long mpidr_set_aff_inst(unsigned long mpidr,
128 unsigned char aff_inst,
129 int aff_lvl)
130{
131 unsigned long aff_shift;
132
133 assert(aff_lvl <= MPIDR_AFFLVL3);
134
135 /*
136 * Decide the number of bits to shift by depending upon
137 * the affinity level
138 */
139 aff_shift = get_afflvl_shift(aff_lvl);
140
141 /* Clear the existing affinity instance & set the new one*/
142 mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
143 mpidr |= aff_inst << aff_shift;
144
145 return mpidr;
146}
147
148/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000149 * This function sanity checks a range of affinity levels.
150 ******************************************************************************/
151int psci_check_afflvl_range(int start_afflvl, int end_afflvl)
152{
153 /* Sanity check the parameters passed */
154 if (end_afflvl > MPIDR_MAX_AFFLVL)
155 return PSCI_E_INVALID_PARAMS;
156
157 if (start_afflvl < MPIDR_AFFLVL0)
158 return PSCI_E_INVALID_PARAMS;
159
160 if (end_afflvl < start_afflvl)
161 return PSCI_E_INVALID_PARAMS;
162
163 return PSCI_E_SUCCESS;
164}
165
166/*******************************************************************************
167 * This function is passed an array of pointers to affinity level nodes in the
168 * topology tree for an mpidr. It picks up locks for each affinity level bottom
169 * up in the range specified.
170 ******************************************************************************/
171void psci_acquire_afflvl_locks(unsigned long mpidr,
172 int start_afflvl,
173 int end_afflvl,
174 mpidr_aff_map_nodes mpidr_nodes)
175{
176 int level;
177
178 for (level = start_afflvl; level <= end_afflvl; level++) {
179 if (mpidr_nodes[level] == NULL)
180 continue;
181 bakery_lock_get(mpidr, &mpidr_nodes[level]->lock);
182 }
183}
184
185/*******************************************************************************
186 * This function is passed an array of pointers to affinity level nodes in the
187 * topology tree for an mpidr. It releases the lock for each affinity level top
188 * down in the range specified.
189 ******************************************************************************/
190void psci_release_afflvl_locks(unsigned long mpidr,
191 int start_afflvl,
192 int end_afflvl,
193 mpidr_aff_map_nodes mpidr_nodes)
194{
195 int level;
196
197 for (level = end_afflvl; level >= start_afflvl; level--) {
198 if (mpidr_nodes[level] == NULL)
199 continue;
200 bakery_lock_release(mpidr, &mpidr_nodes[level]->lock);
201 }
202}
203
204/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100205 * Simple routine to determine whether an affinity instance at a given level
206 * in an mpidr exists or not.
207 ******************************************************************************/
208int psci_validate_mpidr(unsigned long mpidr, int level)
209{
210 aff_map_node *node;
211
212 node = psci_get_aff_map_node(mpidr, level);
213 if (node && (node->state & PSCI_AFF_PRESENT))
214 return PSCI_E_SUCCESS;
215 else
216 return PSCI_E_INVALID_PARAMS;
217}
218
219/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100220 * This function retrieves all the stashed information needed to correctly
221 * resume a cpu's execution in the non-secure state after it has been physically
222 * powered on i.e. turned ON or resumed from SUSPEND
223 ******************************************************************************/
Achin Guptac8afc782013-11-25 18:45:02 +0000224void psci_get_ns_entry_info(unsigned int index)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100225{
226 unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000227 uint64_t mpidr = read_mpidr();
228 cpu_context *ns_entry_context;
229 gp_regs *ns_entry_gpregs;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100230
231 scr = read_scr();
232
Achin Gupta4f6ad662013-10-25 09:08:21 +0100233 /* Find out which EL we are going to */
234 id_aa64pfr0 = read_id_aa64pfr0_el1();
235 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
236 ID_AA64PFR0_ELX_MASK;
237
238 /* Restore endianess */
239 if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT)
240 sctlr |= SCTLR_EE_BIT;
241 else
242 sctlr &= ~SCTLR_EE_BIT;
243
244 /* Turn off MMU and Caching */
245 sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT);
246
247 /* Set the register width */
248 if (psci_ns_entry_info[index].scr & SCR_RW_BIT)
249 scr |= SCR_RW_BIT;
250 else
251 scr &= ~SCR_RW_BIT;
252
253 scr |= SCR_NS_BIT;
254
255 if (el_status)
256 write_sctlr_el2(sctlr);
257 else
258 write_sctlr_el1(sctlr);
259
260 /* Fulfill the cpu_on entry reqs. as per the psci spec */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000261 ns_entry_context = (cpu_context *) cm_get_context(mpidr, NON_SECURE);
262 assert(ns_entry_context);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100263
Achin Guptac8afc782013-11-25 18:45:02 +0000264 /*
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000265 * Setup general purpose registers to return the context id and
266 * prevent leakage of secure information into the normal world.
Achin Guptac8afc782013-11-25 18:45:02 +0000267 */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000268 ns_entry_gpregs = get_gpregs_ctx(ns_entry_context);
269 write_ctx_reg(ns_entry_gpregs,
270 CTX_GPREG_X0,
271 psci_ns_entry_info[index].context_id);
272
273 /*
274 * Tell the context management library to setup EL3 system registers to
275 * be able to ERET into the ns state, and SP_EL3 points to the right
276 * context to exit from EL3 correctly.
277 */
278 cm_set_el3_eret_context(NON_SECURE,
279 psci_ns_entry_info[index].eret_info.entrypoint,
280 psci_ns_entry_info[index].eret_info.spsr,
281 scr);
282
283 cm_set_next_eret_context(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100284}
285
286/*******************************************************************************
287 * This function retrieves and stashes all the information needed to correctly
288 * resume a cpu's execution in the non-secure state after it has been physically
289 * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to
290 * turning it on or before suspending it.
291 ******************************************************************************/
292int psci_set_ns_entry_info(unsigned int index,
293 unsigned long entrypoint,
294 unsigned long context_id)
295{
296 int rc = PSCI_E_SUCCESS;
297 unsigned int rw, mode, ee, spsr = 0;
298 unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr();
299 unsigned long el_status;
300
301 /* Figure out what mode do we enter the non-secure world in */
302 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
303 ID_AA64PFR0_ELX_MASK;
304
305 /*
306 * Figure out whether the cpu enters the non-secure address space
307 * in aarch32 or aarch64
308 */
309 rw = scr & SCR_RW_BIT;
310 if (rw) {
311
312 /*
313 * Check whether a Thumb entry point has been provided for an
314 * aarch64 EL
315 */
316 if (entrypoint & 0x1)
317 return PSCI_E_INVALID_PARAMS;
318
319 if (el_status && (scr & SCR_HCE_BIT)) {
320 mode = MODE_EL2;
321 ee = read_sctlr_el2() & SCTLR_EE_BIT;
322 } else {
323 mode = MODE_EL1;
324 ee = read_sctlr_el1() & SCTLR_EE_BIT;
325 }
326
327 spsr = DAIF_DBG_BIT | DAIF_ABT_BIT;
328 spsr |= DAIF_IRQ_BIT | DAIF_FIQ_BIT;
329 spsr <<= PSR_DAIF_SHIFT;
330 spsr |= make_spsr(mode, MODE_SP_ELX, !rw);
331
332 psci_ns_entry_info[index].sctlr |= ee;
333 psci_ns_entry_info[index].scr |= SCR_RW_BIT;
334 } else {
335
336 /* Check whether aarch32 has to be entered in Thumb mode */
337 if (entrypoint & 0x1)
338 spsr = SPSR32_T_BIT;
339
340 if (el_status && (scr & SCR_HCE_BIT)) {
341 mode = AARCH32_MODE_HYP;
342 ee = read_sctlr_el2() & SCTLR_EE_BIT;
343 } else {
344 mode = AARCH32_MODE_SVC;
345 ee = read_sctlr_el1() & SCTLR_EE_BIT;
346 }
347
348 /*
349 * TODO: Choose async. exception bits if HYP mode is not
350 * implemented according to the values of SCR.{AW, FW} bits
351 */
352 spsr |= DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
353 spsr <<= PSR_DAIF_SHIFT;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000354 if (ee)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100355 spsr |= SPSR32_EE_BIT;
356 spsr |= mode;
357
358 /* Ensure that the CSPR.E and SCTLR.EE bits match */
359 psci_ns_entry_info[index].sctlr |= ee;
360 psci_ns_entry_info[index].scr &= ~SCR_RW_BIT;
361 }
362
363 psci_ns_entry_info[index].eret_info.entrypoint = entrypoint;
364 psci_ns_entry_info[index].eret_info.spsr = spsr;
365 psci_ns_entry_info[index].context_id = context_id;
366
367 return rc;
368}
369
370/*******************************************************************************
Achin Gupta75f73672013-12-05 16:33:10 +0000371 * This function takes a pointer to an affinity node in the topology tree and
372 * returns its state. State of a non-leaf node needs to be calculated.
373 ******************************************************************************/
374unsigned short psci_get_state(aff_map_node *node)
375{
376 assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
377
378 /* A cpu node just contains the state which can be directly returned */
379 if (node->level == MPIDR_AFFLVL0)
380 return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK;
381
382 /*
383 * For an affinity level higher than a cpu, the state has to be
384 * calculated. It depends upon the value of the reference count
385 * which is managed by each node at the next lower affinity level
386 * e.g. for a cluster, each cpu increments/decrements the reference
387 * count. If the reference count is 0 then the affinity level is
388 * OFF else ON.
389 */
390 if (node->ref_count)
391 return PSCI_STATE_ON;
392 else
393 return PSCI_STATE_OFF;
394}
395
396/*******************************************************************************
397 * This function takes a pointer to an affinity node in the topology tree and
398 * a target state. State of a non-leaf node needs to be converted to a reference
399 * count. State of a leaf node can be set directly.
400 ******************************************************************************/
401void psci_set_state(aff_map_node *node, unsigned short state)
402{
403 assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
404
405 /*
406 * For an affinity level higher than a cpu, the state is used
407 * to decide whether the reference count is incremented or
408 * decremented. Entry into the ON_PENDING state does not have
409 * effect.
410 */
411 if (node->level > MPIDR_AFFLVL0) {
412 switch (state) {
413 case PSCI_STATE_ON:
414 node->ref_count++;
415 break;
416 case PSCI_STATE_OFF:
417 case PSCI_STATE_SUSPEND:
418 node->ref_count--;
419 break;
420 case PSCI_STATE_ON_PENDING:
421 /*
422 * An affinity level higher than a cpu will not undergo
423 * a state change when it is about to be turned on
424 */
425 return;
426 default:
427 assert(0);
428 }
429 } else {
430 node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT);
431 node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT;
432 }
433}
434
435/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100436 * An affinity level could be on, on_pending, suspended or off. These are the
Achin Gupta3140a9e2013-12-02 16:23:12 +0000437 * logical states it can be in. Physically either it is off or on. When it is in
438 * the state on_pending then it is about to be turned on. It is not possible to
Achin Gupta4f6ad662013-10-25 09:08:21 +0100439 * tell whether that's actually happenned or not. So we err on the side of
440 * caution & treat the affinity level as being turned off.
441 ******************************************************************************/
Achin Gupta75f73672013-12-05 16:33:10 +0000442unsigned short psci_get_phys_state(aff_map_node *node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100443{
Achin Gupta75f73672013-12-05 16:33:10 +0000444 unsigned int state;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100445
Achin Gupta75f73672013-12-05 16:33:10 +0000446 state = psci_get_state(node);
447 return get_phys_state(state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100448}
449
450/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000451 * This function takes an array of pointers to affinity instance nodes in the
452 * topology tree and calls the physical power on handler for the corresponding
453 * affinity levels
454 ******************************************************************************/
455static int psci_call_power_on_handlers(mpidr_aff_map_nodes mpidr_nodes,
456 int start_afflvl,
457 int end_afflvl,
458 afflvl_power_on_finisher *pon_handlers,
459 unsigned long mpidr)
460{
461 int rc = PSCI_E_INVALID_PARAMS, level;
462 aff_map_node *node;
463
464 for (level = end_afflvl; level >= start_afflvl; level--) {
465 node = mpidr_nodes[level];
466 if (node == NULL)
467 continue;
468
469 /*
470 * If we run into any trouble while powering up an
471 * affinity instance, then there is no recovery path
472 * so simply return an error and let the caller take
473 * care of the situation.
474 */
475 rc = pon_handlers[level](mpidr, node);
476 if (rc != PSCI_E_SUCCESS)
477 break;
478 }
479
480 return rc;
481}
482
483/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100484 * Generic handler which is called when a cpu is physically powered on. It
Achin Gupta0959db52013-12-02 17:33:04 +0000485 * traverses through all the affinity levels performing generic, architectural,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100486 * platform setup and state management e.g. for a cluster that's been powered
487 * on, it will call the platform specific code which will enable coherency at
488 * the interconnect level. For a cpu it could mean turning on the MMU etc.
489 *
Achin Gupta0959db52013-12-02 17:33:04 +0000490 * The state of all the relevant affinity levels is changed after calling the
491 * affinity level specific handlers as their actions would depend upon the state
492 * the affinity level is exiting from.
493 *
494 * The affinity level specific handlers are called in descending order i.e. from
495 * the highest to the lowest affinity level implemented by the platform because
496 * to turn on affinity level X it is neccesary to turn on affinity level X + 1
497 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100498 *
499 * CAUTION: This function is called with coherent stacks so that coherency and
500 * the mmu can be turned on safely.
501 ******************************************************************************/
Achin Gupta0959db52013-12-02 17:33:04 +0000502void psci_afflvl_power_on_finish(unsigned long mpidr,
503 int start_afflvl,
504 int end_afflvl,
505 afflvl_power_on_finisher *pon_handlers)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100506{
Achin Gupta0959db52013-12-02 17:33:04 +0000507 mpidr_aff_map_nodes mpidr_nodes;
508 int rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100509
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000510 mpidr &= MPIDR_AFFINITY_MASK;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100511
512 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000513 * Collect the pointers to the nodes in the topology tree for
514 * each affinity instance in the mpidr. If this function does
515 * not return successfully then either the mpidr or the affinity
516 * levels are incorrect. Either case is an irrecoverable error.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100517 */
Achin Gupta0959db52013-12-02 17:33:04 +0000518 rc = psci_get_aff_map_nodes(mpidr,
519 start_afflvl,
520 end_afflvl,
521 mpidr_nodes);
James Morrissey40a6f642014-02-10 14:24:36 +0000522 if (rc != PSCI_E_SUCCESS)
523 panic();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100524
525 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000526 * This function acquires the lock corresponding to each affinity
527 * level so that by the time all locks are taken, the system topology
528 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100529 */
Achin Gupta0959db52013-12-02 17:33:04 +0000530 psci_acquire_afflvl_locks(mpidr,
531 start_afflvl,
532 end_afflvl,
533 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100534
535 /* Perform generic, architecture and platform specific handling */
Achin Gupta0959db52013-12-02 17:33:04 +0000536 rc = psci_call_power_on_handlers(mpidr_nodes,
537 start_afflvl,
538 end_afflvl,
539 pon_handlers,
540 mpidr);
James Morrissey40a6f642014-02-10 14:24:36 +0000541 if (rc != PSCI_E_SUCCESS)
542 panic();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100543
544 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000545 * This loop releases the lock corresponding to each affinity level
546 * in the reverse order to which they were acquired.
547 */
548 psci_release_afflvl_locks(mpidr,
549 start_afflvl,
550 end_afflvl,
551 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100552}