blob: 6b07c530a24365dd5c62e13258510bb117a88330 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
2 * Copyright (c) 2013, ARM Limited. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <stdio.h>
32#include <string.h>
33#include <assert.h>
34#include <arch_helpers.h>
35#include <console.h>
36#include <platform.h>
37#include <psci.h>
38#include <psci_private.h>
39
40/*******************************************************************************
41 * Arrays that contains information needs to resume a cpu's execution when woken
42 * out of suspend or off states. 'psci_ns_einfo_idx' keeps track of the next
43 * free index in the 'psci_ns_entry_info' & 'psci_secure_context' arrays. Each
44 * cpu is allocated a single entry in each array during startup.
45 ******************************************************************************/
46secure_context psci_secure_context[PSCI_NUM_AFFS];
47ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
48unsigned int psci_ns_einfo_idx;
49
50/*******************************************************************************
51 * Grand array that holds the platform's topology information for state
52 * management of affinity instances. Each node (aff_map_node) in the array
53 * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
54 ******************************************************************************/
55aff_map_node psci_aff_map[PSCI_NUM_AFFS]
56__attribute__ ((section("tzfw_coherent_mem")));
57
58/*******************************************************************************
59 * In a system, a certain number of affinity instances are present at an
60 * affinity level. The cumulative number of instances across all levels are
61 * stored in 'psci_aff_map'. The topology tree has been flattenned into this
62 * array. To retrieve nodes, information about the extents of each affinity
63 * level i.e. start index and end index needs to be present. 'psci_aff_limits'
64 * stores this information.
65 ******************************************************************************/
66aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
67
68/*******************************************************************************
69 * Pointer to functions exported by the platform to complete power mgmt. ops
70 ******************************************************************************/
71plat_pm_ops *psci_plat_pm_ops;
72
73/*******************************************************************************
74 * Simple routine to retrieve the maximum affinity level supported by the
75 * platform and check that it makes sense.
76 ******************************************************************************/
77int get_max_afflvl()
78{
79 int aff_lvl;
80
81 aff_lvl = plat_get_max_afflvl();
82 assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
83
84 return aff_lvl;
85}
86
87/*******************************************************************************
88 * Simple routine to set the id of an affinity instance at a given level in the
89 * mpidr.
90 ******************************************************************************/
91unsigned long mpidr_set_aff_inst(unsigned long mpidr,
92 unsigned char aff_inst,
93 int aff_lvl)
94{
95 unsigned long aff_shift;
96
97 assert(aff_lvl <= MPIDR_AFFLVL3);
98
99 /*
100 * Decide the number of bits to shift by depending upon
101 * the affinity level
102 */
103 aff_shift = get_afflvl_shift(aff_lvl);
104
105 /* Clear the existing affinity instance & set the new one*/
106 mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
107 mpidr |= aff_inst << aff_shift;
108
109 return mpidr;
110}
111
112/*******************************************************************************
113 * Simple routine to determine whether an affinity instance at a given level
114 * in an mpidr exists or not.
115 ******************************************************************************/
116int psci_validate_mpidr(unsigned long mpidr, int level)
117{
118 aff_map_node *node;
119
120 node = psci_get_aff_map_node(mpidr, level);
121 if (node && (node->state & PSCI_AFF_PRESENT))
122 return PSCI_E_SUCCESS;
123 else
124 return PSCI_E_INVALID_PARAMS;
125}
126
127/*******************************************************************************
128 * Simple routine to determine the first affinity level instance that is present
129 * between the start and end affinity levels. This helps to skip handling of
130 * absent affinity levels while performing psci operations.
131 * The start level can be > or <= to the end level depending upon whether this
132 * routine is expected to search top down or bottom up.
133 ******************************************************************************/
134int psci_get_first_present_afflvl(unsigned long mpidr,
135 int start_afflvl,
136 int end_afflvl,
137 aff_map_node **node)
138{
139 int level;
140
141 /* Check whether we have to search up or down */
142 if (start_afflvl <= end_afflvl) {
143 for (level = start_afflvl; level <= end_afflvl; level++) {
144 *node = psci_get_aff_map_node(mpidr, level);
145 if (*node && ((*node)->state & PSCI_AFF_PRESENT))
146 break;
147 }
148 } else {
149 for (level = start_afflvl; level >= end_afflvl; level--) {
150 *node = psci_get_aff_map_node(mpidr, level);
151 if (*node && ((*node)->state & PSCI_AFF_PRESENT))
152 break;
153 }
154 }
155
156 return level;
157}
158
159/*******************************************************************************
160 * Recursively change the affinity state between the current and target affinity
161 * levels. The target state matters only if we are starting from affinity level
162 * 0 i.e. a cpu otherwise the state depends upon the state of the lower affinity
163 * levels.
164 ******************************************************************************/
165int psci_change_state(unsigned long mpidr,
166 int cur_afflvl,
167 int tgt_afflvl,
168 unsigned int tgt_state)
169{
170 int rc = PSCI_E_SUCCESS;
171 unsigned int state;
172 aff_map_node *aff_node;
173
174 /* Sanity check the affinity levels */
175 assert(tgt_afflvl >= cur_afflvl);
176
177 aff_node = psci_get_aff_map_node(mpidr, cur_afflvl);
178 assert(aff_node);
179
180 /* TODO: Check whether the affinity level is present or absent*/
181
182 if (cur_afflvl == MPIDR_AFFLVL0) {
183 psci_set_state(aff_node->state, tgt_state);
184 } else {
185 state = psci_calculate_affinity_state(aff_node);
186 psci_set_state(aff_node->state, state);
187 }
188
189 if (cur_afflvl != tgt_afflvl)
190 psci_change_state(mpidr, cur_afflvl + 1, tgt_afflvl, tgt_state);
191
192 return rc;
193}
194
195/*******************************************************************************
196 * This routine does the heavy lifting for psci_change_state(). It examines the
197 * state of each affinity instance at the next lower affinity level and decides
198 * it's final state accordingly. If a lower affinity instance is ON then the
199 * higher affinity instance is ON. If all the lower affinity instances are OFF
200 * then the higher affinity instance is OFF. If atleast one lower affinity
201 * instance is SUSPENDED then the higher affinity instance is SUSPENDED. If only
202 * a single lower affinity instance is ON_PENDING then the higher affinity
203 * instance in ON_PENDING as well.
204 ******************************************************************************/
205unsigned int psci_calculate_affinity_state(aff_map_node *aff_node)
206{
207 int ctr;
208 unsigned int aff_count, hi_aff_state;
209 unsigned long tempidr;
210 aff_map_node *lo_aff_node;
211
212 /* Cannot calculate lowest affinity state. It's simply assigned */
213 assert(aff_node->level > MPIDR_AFFLVL0);
214
215 /*
216 * Find the number of affinity instances at level X-1 e.g. number of
217 * cpus in a cluster. The level X state depends upon the state of each
218 * instance at level X-1
219 */
220 hi_aff_state = PSCI_STATE_OFF;
221 aff_count = plat_get_aff_count(aff_node->level - 1, aff_node->mpidr);
222 for (ctr = 0; ctr < aff_count; ctr++) {
223
224 /*
225 * Create a mpidr for each lower affinity level (X-1). Use their
226 * states to influence the higher affinity state (X).
227 */
228 tempidr = mpidr_set_aff_inst(aff_node->mpidr,
229 ctr,
230 aff_node->level - 1);
231 lo_aff_node = psci_get_aff_map_node(tempidr,
232 aff_node->level - 1);
233 assert(lo_aff_node);
234
235 /* Continue only if the cpu exists within the cluster */
236 if (!(lo_aff_node->state & PSCI_AFF_PRESENT))
237 continue;
238
239 switch (psci_get_state(lo_aff_node->state)) {
240
241 /*
242 * If any lower affinity is on within the cluster, then
243 * the higher affinity is on.
244 */
245 case PSCI_STATE_ON:
246 return PSCI_STATE_ON;
247
248 /*
249 * At least one X-1 needs to be suspended for X to be suspended
250 * but it's effectively on for the affinity_info call.
251 * SUSPEND > ON_PENDING > OFF.
252 */
253 case PSCI_STATE_SUSPEND:
254 hi_aff_state = PSCI_STATE_SUSPEND;
255 continue;
256
257 /*
258 * Atleast one X-1 needs to be on_pending & the rest off for X
259 * to be on_pending. ON_PENDING > OFF.
260 */
261 case PSCI_STATE_ON_PENDING:
262 if (hi_aff_state != PSCI_STATE_SUSPEND)
263 hi_aff_state = PSCI_STATE_ON_PENDING;
264 continue;
265
266 /* Higher affinity is off if all lower affinities are off. */
267 case PSCI_STATE_OFF:
268 continue;
269
270 default:
271 assert(0);
272 }
273 }
274
275 return hi_aff_state;
276}
277
278/*******************************************************************************
279 * This function retrieves all the stashed information needed to correctly
280 * resume a cpu's execution in the non-secure state after it has been physically
281 * powered on i.e. turned ON or resumed from SUSPEND
282 ******************************************************************************/
283unsigned int psci_get_ns_entry_info(unsigned int index)
284{
285 unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
286
287 scr = read_scr();
288
289 /* Switch to the non-secure view of the registers */
290 write_scr(scr | SCR_NS_BIT);
291
292 /* Find out which EL we are going to */
293 id_aa64pfr0 = read_id_aa64pfr0_el1();
294 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
295 ID_AA64PFR0_ELX_MASK;
296
297 /* Restore endianess */
298 if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT)
299 sctlr |= SCTLR_EE_BIT;
300 else
301 sctlr &= ~SCTLR_EE_BIT;
302
303 /* Turn off MMU and Caching */
304 sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT);
305
306 /* Set the register width */
307 if (psci_ns_entry_info[index].scr & SCR_RW_BIT)
308 scr |= SCR_RW_BIT;
309 else
310 scr &= ~SCR_RW_BIT;
311
312 scr |= SCR_NS_BIT;
313
314 if (el_status)
315 write_sctlr_el2(sctlr);
316 else
317 write_sctlr_el1(sctlr);
318
319 /* Fulfill the cpu_on entry reqs. as per the psci spec */
320 write_scr(scr);
321 write_spsr(psci_ns_entry_info[index].eret_info.spsr);
322 write_elr(psci_ns_entry_info[index].eret_info.entrypoint);
323
324 return psci_ns_entry_info[index].context_id;
325}
326
327/*******************************************************************************
328 * This function retrieves and stashes all the information needed to correctly
329 * resume a cpu's execution in the non-secure state after it has been physically
330 * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to
331 * turning it on or before suspending it.
332 ******************************************************************************/
333int psci_set_ns_entry_info(unsigned int index,
334 unsigned long entrypoint,
335 unsigned long context_id)
336{
337 int rc = PSCI_E_SUCCESS;
338 unsigned int rw, mode, ee, spsr = 0;
339 unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr();
340 unsigned long el_status;
341
342 /* Figure out what mode do we enter the non-secure world in */
343 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
344 ID_AA64PFR0_ELX_MASK;
345
346 /*
347 * Figure out whether the cpu enters the non-secure address space
348 * in aarch32 or aarch64
349 */
350 rw = scr & SCR_RW_BIT;
351 if (rw) {
352
353 /*
354 * Check whether a Thumb entry point has been provided for an
355 * aarch64 EL
356 */
357 if (entrypoint & 0x1)
358 return PSCI_E_INVALID_PARAMS;
359
360 if (el_status && (scr & SCR_HCE_BIT)) {
361 mode = MODE_EL2;
362 ee = read_sctlr_el2() & SCTLR_EE_BIT;
363 } else {
364 mode = MODE_EL1;
365 ee = read_sctlr_el1() & SCTLR_EE_BIT;
366 }
367
368 spsr = DAIF_DBG_BIT | DAIF_ABT_BIT;
369 spsr |= DAIF_IRQ_BIT | DAIF_FIQ_BIT;
370 spsr <<= PSR_DAIF_SHIFT;
371 spsr |= make_spsr(mode, MODE_SP_ELX, !rw);
372
373 psci_ns_entry_info[index].sctlr |= ee;
374 psci_ns_entry_info[index].scr |= SCR_RW_BIT;
375 } else {
376
377 /* Check whether aarch32 has to be entered in Thumb mode */
378 if (entrypoint & 0x1)
379 spsr = SPSR32_T_BIT;
380
381 if (el_status && (scr & SCR_HCE_BIT)) {
382 mode = AARCH32_MODE_HYP;
383 ee = read_sctlr_el2() & SCTLR_EE_BIT;
384 } else {
385 mode = AARCH32_MODE_SVC;
386 ee = read_sctlr_el1() & SCTLR_EE_BIT;
387 }
388
389 /*
390 * TODO: Choose async. exception bits if HYP mode is not
391 * implemented according to the values of SCR.{AW, FW} bits
392 */
393 spsr |= DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
394 spsr <<= PSR_DAIF_SHIFT;
395 if(ee)
396 spsr |= SPSR32_EE_BIT;
397 spsr |= mode;
398
399 /* Ensure that the CSPR.E and SCTLR.EE bits match */
400 psci_ns_entry_info[index].sctlr |= ee;
401 psci_ns_entry_info[index].scr &= ~SCR_RW_BIT;
402 }
403
404 psci_ns_entry_info[index].eret_info.entrypoint = entrypoint;
405 psci_ns_entry_info[index].eret_info.spsr = spsr;
406 psci_ns_entry_info[index].context_id = context_id;
407
408 return rc;
409}
410
411/*******************************************************************************
412 * An affinity level could be on, on_pending, suspended or off. These are the
413 * logical states it can be in. Physically either it's off or on. When it's in
414 * the state on_pending then it's about to be turned on. It's not possible to
415 * tell whether that's actually happenned or not. So we err on the side of
416 * caution & treat the affinity level as being turned off.
417 ******************************************************************************/
418inline unsigned int psci_get_phys_state(unsigned int aff_state)
419{
420 return (aff_state != PSCI_STATE_ON ? PSCI_STATE_OFF : PSCI_STATE_ON);
421}
422
423unsigned int psci_get_aff_phys_state(aff_map_node *aff_node)
424{
425 unsigned int aff_state;
426
427 aff_state = psci_get_state(aff_node->state);
428 return psci_get_phys_state(aff_state);
429}
430
431/*******************************************************************************
432 * Generic handler which is called when a cpu is physically powered on. It
433 * recurses through all the affinity levels performing generic, architectural,
434 * platform setup and state management e.g. for a cluster that's been powered
435 * on, it will call the platform specific code which will enable coherency at
436 * the interconnect level. For a cpu it could mean turning on the MMU etc.
437 *
438 * This function traverses from the lowest to the highest affinity level
439 * implemented by the platform. Since it's recursive, for each call the
440 * 'cur_afflvl' & 'tgt_afflvl' parameters keep track of which level we are at
441 * and which level we need to get to respectively. Locks are picked up along the
442 * way so that when the lowest affinity level is hit, state management can be
443 * safely done. Prior to this, each affinity level does it's bookeeping as per
444 * the state out of reset.
445 *
446 * CAUTION: This function is called with coherent stacks so that coherency and
447 * the mmu can be turned on safely.
448 ******************************************************************************/
449unsigned int psci_afflvl_power_on_finish(unsigned long mpidr,
450 int cur_afflvl,
451 int tgt_afflvl,
452 afflvl_power_on_finisher *pon_handlers)
453{
454 unsigned int prev_state, next_state, rc = PSCI_E_SUCCESS;
455 aff_map_node *aff_node;
456 int level;
457
458 mpidr &= MPIDR_AFFINITY_MASK;;
459
460 /*
461 * Some affinity instances at levels between the current and
462 * target levels could be absent in the mpidr. Skip them and
463 * start from the first present instance.
464 */
465 level = psci_get_first_present_afflvl(mpidr,
466 cur_afflvl,
467 tgt_afflvl,
468 &aff_node);
469 /*
470 * Return if there are no more affinity instances beyond this
471 * level to process. Else ensure that the returned affinity
472 * node makes sense.
473 */
474 if (aff_node == NULL)
475 return rc;
476
477 assert(level == aff_node->level);
478
479 /*
480 * This function acquires the lock corresponding to each
481 * affinity level so that by the time we hit the highest
482 * affinity level, the system topology is snapshot and state
483 * management can be done safely.
484 */
485 bakery_lock_get(mpidr, &aff_node->lock);
486
487 /* Keep the old and new state handy */
488 prev_state = psci_get_state(aff_node->state);
489 next_state = PSCI_STATE_ON;
490
491 /* Perform generic, architecture and platform specific handling */
492 rc = pon_handlers[level](mpidr, aff_node, prev_state);
493 if (rc != PSCI_E_SUCCESS) {
494 psci_set_state(aff_node->state, prev_state);
495 goto exit;
496 }
497
498 /*
499 * State management: Update the states if this is the highest
500 * affinity level requested else pass the job to the next level.
501 */
502 if (aff_node->level != tgt_afflvl) {
503 rc = psci_afflvl_power_on_finish(mpidr,
504 level + 1,
505 tgt_afflvl,
506 pon_handlers);
507 } else {
508 psci_change_state(mpidr, MPIDR_AFFLVL0, tgt_afflvl, next_state);
509 }
510
511 /* If all has gone as per plan then this cpu should be marked as ON */
512 if (level == MPIDR_AFFLVL0) {
513 next_state = psci_get_state(aff_node->state);
514 assert(next_state == PSCI_STATE_ON);
515 }
516
517exit:
518 bakery_lock_release(mpidr, &aff_node->lock);
519 return rc;
520}