blob: ea2abbfb5d068af415f3e4756ddcb3c6cb946eea [file] [log] [blame]
Pankaj Guptaa9e3ac22020-12-09 14:02:40 +05301
2/*
3 * Copyright 2018-2020 NXP
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 */
8
9#include <asm_macros.S>
10#include <assert_macros.S>
11
12#include <lib/psci/psci.h>
13
14#include <bl31_data.h>
15#include <plat_psci.h>
16
17
18#define RESET_RETRY_CNT 800
19#define PSCI_ABORT_CNT 100
20
21#if (SOC_CORE_RELEASE)
22
23.global _psci_cpu_on
24
25/*
26 * int _psci_cpu_on(u_register_t core_mask)
27 * x0 = target cpu core mask
28 *
29 * Called from C, so save the non-volatile regs
30 * save these as pairs of registers to maintain the
31 * required 16-byte alignment on the stack
32 *
33 */
34
35func _psci_cpu_on
36 stp x4, x5, [sp, #-16]!
37 stp x6, x7, [sp, #-16]!
38 stp x8, x9, [sp, #-16]!
39 stp x10, x11, [sp, #-16]!
40 stp x12, x13, [sp, #-16]!
41 stp x14, x15, [sp, #-16]!
42 stp x16, x17, [sp, #-16]!
43 stp x18, x30, [sp, #-16]!
44
45 mov x6, x0
46
47 /* x0 = core mask (lsb)
48 * x6 = core mask (lsb)
49 */
50
51 /* check if core disabled */
52 bl _soc_ck_disabled /* 0-2 */
53 cbnz w0, psci_disabled
54
55 /* check core data area to see if core cannot be turned on
56 * read the core state
57 */
58 mov x0, x6
59 bl _getCoreState /* 0-5 */
60 mov x9, x0
61
62 /* x6 = core mask (lsb)
63 * x9 = core state (from data area)
64 */
65
66 cmp x9, #CORE_DISABLED
67 mov x0, #PSCI_E_DISABLED
68 b.eq cpu_on_done
69
70 cmp x9, #CORE_PENDING
71 mov x0, #PSCI_E_ON_PENDING
72 b.eq cpu_on_done
73
74 cmp x9, #CORE_RELEASED
75 mov x0, #PSCI_E_ALREADY_ON
76 b.eq cpu_on_done
77
788:
79 /* x6 = core mask (lsb)
80 * x9 = core state (from data area)
81 */
82
83 cmp x9, #CORE_WFE
84 b.eq core_in_wfe
85 cmp x9, #CORE_IN_RESET
86 b.eq core_in_reset
87 cmp x9, #CORE_OFF
88 b.eq core_is_off
89 cmp x9, #CORE_OFF_PENDING
90
91 /* if state == CORE_OFF_PENDING, set abort */
92 mov x0, x6
93 mov x1, #ABORT_FLAG_DATA
94 mov x2, #CORE_ABORT_OP
95 bl _setCoreData /* 0-3, [13-15] */
96
97 ldr x3, =PSCI_ABORT_CNT
987:
99 /* watch for abort to take effect */
100 mov x0, x6
101 bl _getCoreState /* 0-5 */
102 cmp x0, #CORE_OFF
103 b.eq core_is_off
104 cmp x0, #CORE_PENDING
105 mov x0, #PSCI_E_SUCCESS
106 b.eq cpu_on_done
107
108 /* loop til finished */
109 sub x3, x3, #1
110 cbnz x3, 7b
111
112 /* if we didn't see either CORE_OFF or CORE_PENDING, then this
113 * core is in CORE_OFF_PENDING - exit with success, as the core will
114 * respond to the abort request
115 */
116 mov x0, #PSCI_E_SUCCESS
117 b cpu_on_done
118
119/* this is where we start up a core out of reset */
120core_in_reset:
121 /* see if the soc-specific module supports this op */
122 ldr x7, =SOC_CORE_RELEASE
123 cbnz x7, 3f
124
125 mov x0, #PSCI_E_NOT_SUPPORTED
126 b cpu_on_done
127
128 /* x6 = core mask (lsb) */
1293:
130 /* set core state in data area */
131 mov x0, x6
132 mov x1, #CORE_PENDING
133 bl _setCoreState /* 0-3, [13-15] */
134
135 /* release the core from reset */
136 mov x0, x6
137 bl _soc_core_release /* 0-3 */
138 mov x0, #PSCI_E_SUCCESS
139 b cpu_on_done
140
141 /* Start up the core that has been powered-down via CPU_OFF
142 */
143core_is_off:
144 /* see if the soc-specific module supports this op
145 */
146 ldr x7, =SOC_CORE_RESTART
147 cbnz x7, 2f
148
149 mov x0, #PSCI_E_NOT_SUPPORTED
150 b cpu_on_done
151
152 /* x6 = core mask (lsb) */
1532:
154 /* set core state in data area */
155 mov x0, x6
156 mov x1, #CORE_WAKEUP
157 bl _setCoreState /* 0-3, [13-15] */
158
159 /* put the core back into service */
160 mov x0, x6
161#if (SOC_CORE_RESTART)
162 bl _soc_core_restart /* 0-5 */
163#endif
164 mov x0, #PSCI_E_SUCCESS
165 b cpu_on_done
166
167/* this is where we release a core that is being held in wfe */
168core_in_wfe:
169 /* x6 = core mask (lsb) */
170
171 /* set core state in data area */
172 mov x0, x6
173 mov x1, #CORE_PENDING
174 bl _setCoreState /* 0-3, [13-15] */
175 dsb sy
176 isb
177
178 /* put the core back into service */
179 sev
180 sev
181 isb
182 mov x0, #PSCI_E_SUCCESS
183
184cpu_on_done:
185 /* restore the aarch32/64 non-volatile registers */
186 ldp x18, x30, [sp], #16
187 ldp x16, x17, [sp], #16
188 ldp x14, x15, [sp], #16
189 ldp x12, x13, [sp], #16
190 ldp x10, x11, [sp], #16
191 ldp x8, x9, [sp], #16
192 ldp x6, x7, [sp], #16
193 ldp x4, x5, [sp], #16
194 b psci_completed
195endfunc _psci_cpu_on
196
197#endif
198
199
200#if (SOC_CORE_OFF)
201
202.global _psci_cpu_prep_off
203.global _psci_cpu_off_wfi
204
205/*
206 * void _psci_cpu_prep_off(u_register_t core_mask)
207 * this function performs the SoC-specific programming prior
208 * to shutting the core down
209 * x0 = core_mask
210 *
211 * called from C, so save the non-volatile regs
212 * save these as pairs of registers to maintain the
213 * required 16-byte alignment on the stack
214 */
215
216func _psci_cpu_prep_off
217
218 stp x4, x5, [sp, #-16]!
219 stp x6, x7, [sp, #-16]!
220 stp x8, x9, [sp, #-16]!
221 stp x10, x11, [sp, #-16]!
222 stp x12, x13, [sp, #-16]!
223 stp x14, x15, [sp, #-16]!
224 stp x16, x17, [sp, #-16]!
225 stp x18, x30, [sp, #-16]!
226
227 mov x10, x0 /* x10 = core_mask */
228
229 /* the core does not return from cpu_off, so no need
230 * to save/restore non-volatile registers
231 */
232
233 /* mask interrupts by setting DAIF[7:4] to 'b1111 */
234 msr DAIFSet, #0xF
235
236 /* read cpuectlr and save current value */
237 mrs x4, CORTEX_A72_ECTLR_EL1
238 mov x1, #CPUECTLR_DATA
239 mov x2, x4
240 mov x0, x10
241 bl _setCoreData
242
243 /* remove the core from coherency */
244 bic x4, x4, #CPUECTLR_SMPEN_MASK
245 msr CORTEX_A72_ECTLR_EL1, x4
246
247 /* save scr_el3 */
248 mov x0, x10
249 mrs x4, SCR_EL3
250 mov x2, x4
251 mov x1, #SCR_EL3_DATA
252 bl _setCoreData
253
254 /* x4 = scr_el3 */
255
256 /* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */
257 orr x4, x4, #SCR_FIQ_MASK
258 msr scr_el3, x4
259
260 /* x10 = core_mask */
261
262 /* prep the core for shutdown */
263 mov x0, x10
264 bl _soc_core_prep_off
265
266 /* restore the aarch32/64 non-volatile registers */
267 ldp x18, x30, [sp], #16
268 ldp x16, x17, [sp], #16
269 ldp x14, x15, [sp], #16
270 ldp x12, x13, [sp], #16
271 ldp x10, x11, [sp], #16
272 ldp x8, x9, [sp], #16
273 ldp x6, x7, [sp], #16
274 ldp x4, x5, [sp], #16
275 b psci_completed
276endfunc _psci_cpu_prep_off
277
278/*
279 * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr)
280 * - this function shuts down the core
281 * - this function does not return!!
282 */
283
284func _psci_cpu_off_wfi
285 /* save the wakeup address */
286 mov x29, x1
287
288 /* x0 = core_mask */
289
290 /* shutdown the core */
291 bl _soc_core_entr_off
292
293 /* branch to resume execution */
294 br x29
295endfunc _psci_cpu_off_wfi
296
297#endif
298
299
300#if (SOC_CORE_RESTART)
301
302.global _psci_wakeup
303
304/*
305 * void _psci_wakeup(u_register_t core_mask)
306 * this function performs the SoC-specific programming
307 * after a core wakes up from OFF
308 * x0 = core mask
309 *
310 * called from C, so save the non-volatile regs
311 * save these as pairs of registers to maintain the
312 * required 16-byte alignment on the stack
313 */
314
315func _psci_wakeup
316
317 stp x4, x5, [sp, #-16]!
318 stp x6, x7, [sp, #-16]!
319 stp x8, x9, [sp, #-16]!
320 stp x10, x11, [sp, #-16]!
321 stp x12, x13, [sp, #-16]!
322 stp x14, x15, [sp, #-16]!
323 stp x16, x17, [sp, #-16]!
324 stp x18, x30, [sp, #-16]!
325
326 mov x4, x0 /* x4 = core mask */
327
328 /* restore scr_el3 */
329 mov x0, x4
330 mov x1, #SCR_EL3_DATA
331 bl _getCoreData
332 /* x0 = saved scr_el3 */
333 msr SCR_EL3, x0
334
335 /* x4 = core mask */
336
337 /* restore CPUECTLR */
338 mov x0, x4
339 mov x1, #CPUECTLR_DATA
340 bl _getCoreData
341 orr x0, x0, #CPUECTLR_SMPEN_MASK
342 msr CORTEX_A72_ECTLR_EL1, x0
343
344 /* x4 = core mask */
345
346 /* start the core back up */
347 mov x0, x4
348 bl _soc_core_exit_off
349
350 /* restore the aarch32/64 non-volatile registers
351 */
352 ldp x18, x30, [sp], #16
353 ldp x16, x17, [sp], #16
354 ldp x14, x15, [sp], #16
355 ldp x12, x13, [sp], #16
356 ldp x10, x11, [sp], #16
357 ldp x8, x9, [sp], #16
358 ldp x6, x7, [sp], #16
359 ldp x4, x5, [sp], #16
360 b psci_completed
361endfunc _psci_wakeup
362
363#endif
364
365
366#if (SOC_SYSTEM_RESET)
367
368.global _psci_system_reset
369
370func _psci_system_reset
371
372 /* system reset is mandatory
373 * system reset is soc-specific
374 * Note: under no circumstances do we return from this call
375 */
376 bl _soc_sys_reset
377endfunc _psci_system_reset
378
379#endif
380
381
382#if (SOC_SYSTEM_OFF)
383
384.global _psci_system_off
385
386func _psci_system_off
387
388 /* system off is mandatory
389 * system off is soc-specific
390 * Note: under no circumstances do we return from this call */
391 b _soc_sys_off
392endfunc _psci_system_off
393
394#endif
395
396
397#if (SOC_CORE_STANDBY)
398
399.global _psci_core_entr_stdby
400.global _psci_core_prep_stdby
401.global _psci_core_exit_stdby
402
403/*
404 * void _psci_core_entr_stdby(u_register_t core_mask) - this
405 * is the fast-path for simple core standby
406 */
407
408func _psci_core_entr_stdby
409 stp x4, x5, [sp, #-16]!
410 stp x6, x30, [sp, #-16]!
411
412 mov x5, x0 /* x5 = core mask */
413
414 /* save scr_el3 */
415 mov x0, x5
416 mrs x4, SCR_EL3
417 mov x2, x4
418 mov x1, #SCR_EL3_DATA
419 bl _setCoreData
420
421 /* x4 = SCR_EL3
422 * x5 = core mask
423 */
424
425 /* allow interrupts @ EL3 */
426 orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
427 msr SCR_EL3, x4
428
429 /* x5 = core mask */
430
431 /* put the core into standby */
432 mov x0, x5
433 bl _soc_core_entr_stdby
434
435 /* restore scr_el3 */
436 mov x0, x5
437 mov x1, #SCR_EL3_DATA
438 bl _getCoreData
439 /* x0 = saved scr_el3 */
440 msr SCR_EL3, x0
441
442 ldp x6, x30, [sp], #16
443 ldp x4, x5, [sp], #16
444 isb
445 ret
446endfunc _psci_core_entr_stdby
447
448/*
449 * void _psci_core_prep_stdby(u_register_t core_mask) - this
450 * sets up the core to enter standby state thru the normal path
451 */
452
453func _psci_core_prep_stdby
454 stp x4, x5, [sp, #-16]!
455 stp x6, x30, [sp, #-16]!
456
457 mov x5, x0
458
459 /* x5 = core mask */
460
461 /* save scr_el3 */
462 mov x0, x5
463 mrs x4, SCR_EL3
464 mov x2, x4
465 mov x1, #SCR_EL3_DATA
466 bl _setCoreData
467
468 /* allow interrupts @ EL3 */
469 orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
470 msr SCR_EL3, x4
471
472 /* x5 = core mask */
473
474 /* call for any SoC-specific programming */
475 mov x0, x5
476 bl _soc_core_prep_stdby
477
478 ldp x6, x30, [sp], #16
479 ldp x4, x5, [sp], #16
480 isb
481 ret
482endfunc _psci_core_prep_stdby
483
484/*
485 * void _psci_core_exit_stdby(u_register_t core_mask) - this
486 * exits the core from standby state thru the normal path
487 */
488
489func _psci_core_exit_stdby
490 stp x4, x5, [sp, #-16]!
491 stp x6, x30, [sp, #-16]!
492
493 mov x5, x0
494
495 /* x5 = core mask */
496
497 /* restore scr_el3 */
498 mov x0, x5
499 mov x1, #SCR_EL3_DATA
500 bl _getCoreData
501 /* x0 = saved scr_el3 */
502 msr SCR_EL3, x0
503
504 /* x5 = core mask */
505
506 /* perform any SoC-specific programming after standby state */
507 mov x0, x5
508 bl _soc_core_exit_stdby
509
510 ldp x6, x30, [sp], #16
511 ldp x4, x5, [sp], #16
512 isb
513 ret
514endfunc _psci_core_exit_stdby
515
516#endif
517
518
519#if (SOC_CORE_PWR_DWN)
520
521.global _psci_core_prep_pwrdn
522.global _psci_cpu_pwrdn_wfi
523.global _psci_core_exit_pwrdn
524
525/*
526 * void _psci_core_prep_pwrdn_(u_register_t core_mask)
527 * this function prepares the core for power-down
528 * x0 = core mask
529 *
530 * called from C, so save the non-volatile regs
531 * save these as pairs of registers to maintain the
532 * required 16-byte alignment on the stack
533 */
534
535func _psci_core_prep_pwrdn
536 stp x4, x5, [sp, #-16]!
537 stp x6, x7, [sp, #-16]!
538 stp x8, x9, [sp, #-16]!
539 stp x10, x11, [sp, #-16]!
540 stp x12, x13, [sp, #-16]!
541 stp x14, x15, [sp, #-16]!
542 stp x16, x17, [sp, #-16]!
543 stp x18, x30, [sp, #-16]!
544
545 mov x6, x0
546
547 /* x6 = core mask */
548
549 /* mask interrupts by setting DAIF[7:4] to 'b1111 */
550 msr DAIFSet, #0xF
551
552 /* save scr_el3 */
553 mov x0, x6
554 mrs x4, SCR_EL3
555 mov x2, x4
556 mov x1, #SCR_EL3_DATA
557 bl _setCoreData
558
559 /* allow interrupts @ EL3 */
560 orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
561 msr SCR_EL3, x4
562
563 /* save cpuectlr */
564 mov x0, x6
565 mov x1, #CPUECTLR_DATA
566 mrs x2, CORTEX_A72_ECTLR_EL1
567 bl _setCoreData
568
569 /* x6 = core mask */
570
571 /* SoC-specific programming for power-down */
572 mov x0, x6
573 bl _soc_core_prep_pwrdn
574
575 /* restore the aarch32/64 non-volatile registers
576 */
577 ldp x18, x30, [sp], #16
578 ldp x16, x17, [sp], #16
579 ldp x14, x15, [sp], #16
580 ldp x12, x13, [sp], #16
581 ldp x10, x11, [sp], #16
582 ldp x8, x9, [sp], #16
583 ldp x6, x7, [sp], #16
584 ldp x4, x5, [sp], #16
585 b psci_completed
586endfunc _psci_core_prep_pwrdn
587
588/*
589 * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
590 * this function powers down the core
591 */
592
593func _psci_cpu_pwrdn_wfi
594 /* save the wakeup address */
595 mov x29, x1
596
597 /* x0 = core mask */
598
599 /* shutdown the core */
600 bl _soc_core_entr_pwrdn
601
602 /* branch to resume execution */
603 br x29
604endfunc _psci_cpu_pwrdn_wfi
605
606/*
607 * void _psci_core_exit_pwrdn_(u_register_t core_mask)
608 * this function cleans up after a core power-down
609 * x0 = core mask
610 *
611 * called from C, so save the non-volatile regs
612 * save these as pairs of registers to maintain the
613 * required 16-byte alignment on the stack
614 */
615
616func _psci_core_exit_pwrdn
617 stp x4, x5, [sp, #-16]!
618 stp x6, x7, [sp, #-16]!
619 stp x8, x9, [sp, #-16]!
620 stp x10, x11, [sp, #-16]!
621 stp x12, x13, [sp, #-16]!
622 stp x14, x15, [sp, #-16]!
623 stp x16, x17, [sp, #-16]!
624 stp x18, x30, [sp, #-16]!
625
626 mov x5, x0 /* x5 = core mask */
627
628 /* restore scr_el3 */
629 mov x0, x5
630 mov x1, #SCR_EL3_DATA
631 bl _getCoreData
632 /* x0 = saved scr_el3 */
633 msr SCR_EL3, x0
634
635 /* x5 = core mask */
636
637 /* restore cpuectlr */
638 mov x0, x5
639 mov x1, #CPUECTLR_DATA
640 bl _getCoreData
641 /* make sure smp is set */
642 orr x0, x0, #CPUECTLR_SMPEN_MASK
643 msr CORTEX_A72_ECTLR_EL1, x0
644
645 /* x5 = core mask */
646
647 /* SoC-specific cleanup */
648 mov x0, x5
649 bl _soc_core_exit_pwrdn
650
651 /* restore the aarch32/64 non-volatile registers
652 */
653 ldp x18, x30, [sp], #16
654 ldp x16, x17, [sp], #16
655 ldp x14, x15, [sp], #16
656 ldp x12, x13, [sp], #16
657 ldp x10, x11, [sp], #16
658 ldp x8, x9, [sp], #16
659 ldp x6, x7, [sp], #16
660 ldp x4, x5, [sp], #16
661 b psci_completed
662endfunc _psci_core_exit_pwrdn
663
664#endif
665
666#if (SOC_CLUSTER_STANDBY)
667
668.global _psci_clstr_prep_stdby
669.global _psci_clstr_exit_stdby
670
671/*
672 * void _psci_clstr_prep_stdby(u_register_t core_mask) - this
673 * sets up the clstr to enter standby state thru the normal path
674 */
675
676func _psci_clstr_prep_stdby
677 stp x4, x5, [sp, #-16]!
678 stp x6, x30, [sp, #-16]!
679
680 mov x5, x0
681
682 /* x5 = core mask */
683
684 /* save scr_el3 */
685 mov x0, x5
686 mrs x4, SCR_EL3
687 mov x2, x4
688 mov x1, #SCR_EL3_DATA
689 bl _setCoreData
690
691 /* allow interrupts @ EL3 */
692 orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
693 msr SCR_EL3, x4
694
695 /* x5 = core mask */
696
697 /* call for any SoC-specific programming */
698 mov x0, x5
699 bl _soc_clstr_prep_stdby
700
701 ldp x6, x30, [sp], #16
702 ldp x4, x5, [sp], #16
703 isb
704 ret
705endfunc _psci_clstr_prep_stdby
706
707/*
708 * void _psci_clstr_exit_stdby(u_register_t core_mask) - this
709 * exits the clstr from standby state thru the normal path
710 */
711
712func _psci_clstr_exit_stdby
713 stp x4, x5, [sp, #-16]!
714 stp x6, x30, [sp, #-16]!
715
716 mov x5, x0 /* x5 = core mask */
717
718 /* restore scr_el3 */
719 mov x0, x5
720 mov x1, #SCR_EL3_DATA
721 bl _getCoreData
722 /* x0 = saved scr_el3 */
723 msr SCR_EL3, x0
724
725 /* x5 = core mask */
726
727 /* perform any SoC-specific programming after standby state */
728 mov x0, x5
729 bl _soc_clstr_exit_stdby
730
731 ldp x6, x30, [sp], #16
732 ldp x4, x5, [sp], #16
733 isb
734 ret
735endfunc _psci_clstr_exit_stdby
736
737#endif
738
739#if (SOC_CLUSTER_PWR_DWN)
740
741.global _psci_clstr_prep_pwrdn
742.global _psci_clstr_exit_pwrdn
743
744/*
745 * void _psci_clstr_prep_pwrdn_(u_register_t core_mask)
746 * this function prepares the cluster+core for power-down
747 * x0 = core mask
748 *
749 * called from C, so save the non-volatile regs
750 * save these as pairs of registers to maintain the
751 * required 16-byte alignment on the stack
752 */
753
754func _psci_clstr_prep_pwrdn
755 stp x4, x5, [sp, #-16]!
756 stp x6, x7, [sp, #-16]!
757 stp x8, x9, [sp, #-16]!
758 stp x10, x11, [sp, #-16]!
759 stp x12, x13, [sp, #-16]!
760 stp x14, x15, [sp, #-16]!
761 stp x16, x17, [sp, #-16]!
762 stp x18, x30, [sp, #-16]!
763
764 mov x6, x0 /* x6 = core mask */
765
766 /* mask interrupts by setting DAIF[7:4] to 'b1111 */
767 msr DAIFSet, #0xF
768
769 /* save scr_el3 */
770 mov x0, x6
771 mrs x4, SCR_EL3
772 mov x2, x4
773 mov x1, #SCR_EL3_DATA
774 bl _setCoreData
775
776 /* allow interrupts @ EL3 */
777 orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
778 msr SCR_EL3, x4
779
780 /* save cpuectlr */
781 mov x0, x6
782 mov x1, #CPUECTLR_DATA
783 mrs x2, CORTEX_A72_ECTLR_EL1
784 mov x4, x2
785 bl _setCoreData
786
787 /* remove core from coherency */
788 bic x4, x4, #CPUECTLR_SMPEN_MASK
789 msr CORTEX_A72_ECTLR_EL1, x4
790
791 /* x6 = core mask */
792
793 /* SoC-specific programming for power-down */
794 mov x0, x6
795 bl _soc_clstr_prep_pwrdn
796
797 /* restore the aarch32/64 non-volatile registers
798 */
799 ldp x18, x30, [sp], #16
800 ldp x16, x17, [sp], #16
801 ldp x14, x15, [sp], #16
802 ldp x12, x13, [sp], #16
803 ldp x10, x11, [sp], #16
804 ldp x8, x9, [sp], #16
805 ldp x6, x7, [sp], #16
806 ldp x4, x5, [sp], #16
807 b psci_completed
808endfunc _psci_clstr_prep_pwrdn
809
810/*
811 * void _psci_clstr_exit_pwrdn_(u_register_t core_mask)
812 * this function cleans up after a cluster power-down
813 * x0 = core mask
814 *
815 * called from C, so save the non-volatile regs
816 * save these as pairs of registers to maintain the
817 * required 16-byte alignment on the stack
818 */
819
820func _psci_clstr_exit_pwrdn
821 stp x4, x5, [sp, #-16]!
822 stp x6, x7, [sp, #-16]!
823 stp x8, x9, [sp, #-16]!
824 stp x10, x11, [sp, #-16]!
825 stp x12, x13, [sp, #-16]!
826 stp x14, x15, [sp, #-16]!
827 stp x16, x17, [sp, #-16]!
828 stp x18, x30, [sp, #-16]!
829
830 mov x4, x0 /* x4 = core mask */
831
832 /* restore scr_el3 */
833 mov x0, x4
834 mov x1, #SCR_EL3_DATA
835 bl _getCoreData
836 /* x0 = saved scr_el3 */
837 msr SCR_EL3, x0
838
839 /* x4 = core mask */
840
841 /* restore cpuectlr */
842 mov x0, x4
843 mov x1, #CPUECTLR_DATA
844 bl _getCoreData
845 /* make sure smp is set */
846 orr x0, x0, #CPUECTLR_SMPEN_MASK
847 msr CORTEX_A72_ECTLR_EL1, x0
848
849 /* x4 = core mask */
850
851 /* SoC-specific cleanup */
852 mov x0, x4
853 bl _soc_clstr_exit_pwrdn
854
855 /* restore the aarch32/64 non-volatile registers
856 */
857 ldp x18, x30, [sp], #16
858 ldp x16, x17, [sp], #16
859 ldp x14, x15, [sp], #16
860 ldp x12, x13, [sp], #16
861 ldp x10, x11, [sp], #16
862 ldp x8, x9, [sp], #16
863 ldp x6, x7, [sp], #16
864 ldp x4, x5, [sp], #16
865 b psci_completed
866endfunc _psci_clstr_exit_pwrdn
867
868#endif
869
870#if (SOC_SYSTEM_STANDBY)
871
872.global _psci_sys_prep_stdby
873.global _psci_sys_exit_stdby
874
875/*
876 * void _psci_sys_prep_stdby(u_register_t core_mask) - this
877 * sets up the system to enter standby state thru the normal path
878 */
879
880func _psci_sys_prep_stdby
881 stp x4, x5, [sp, #-16]!
882 stp x6, x30, [sp, #-16]!
883
884 mov x5, x0 /* x5 = core mask */
885
886 /* save scr_el3 */
887 mov x0, x5
888 mrs x4, SCR_EL3
889 mov x2, x4
890 mov x1, #SCR_EL3_DATA
891 bl _setCoreData
892
893 /* allow interrupts @ EL3 */
894 orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
895 msr SCR_EL3, x4
896
897 /* x5 = core mask */
898
899 /* call for any SoC-specific programming */
900 mov x0, x5
901 bl _soc_sys_prep_stdby
902
903 ldp x6, x30, [sp], #16
904 ldp x4, x5, [sp], #16
905 isb
906 ret
907endfunc _psci_sys_prep_stdby
908
909/*
910 * void _psci_sys_exit_stdby(u_register_t core_mask) - this
911 * exits the system from standby state thru the normal path
912 */
913
914func _psci_sys_exit_stdby
915 stp x4, x5, [sp, #-16]!
916 stp x6, x30, [sp, #-16]!
917
918 mov x5, x0
919
920 /* x5 = core mask */
921
922 /* restore scr_el3 */
923 mov x0, x5
924 mov x1, #SCR_EL3_DATA
925 bl _getCoreData
926 /* x0 = saved scr_el3 */
927 msr SCR_EL3, x0
928
929 /* x5 = core mask */
930
931 /* perform any SoC-specific programming after standby state */
932 mov x0, x5
933 bl _soc_sys_exit_stdby
934
935 ldp x6, x30, [sp], #16
936 ldp x4, x5, [sp], #16
937 isb
938 ret
939endfunc _psci_sys_exit_stdby
940
941#endif
942
943#if (SOC_SYSTEM_PWR_DWN)
944
945.global _psci_sys_prep_pwrdn
946.global _psci_sys_pwrdn_wfi
947.global _psci_sys_exit_pwrdn
948
949/*
950 * void _psci_sys_prep_pwrdn_(u_register_t core_mask)
951 * this function prepares the system+core for power-down
952 * x0 = core mask
953 *
954 * called from C, so save the non-volatile regs
955 * save these as pairs of registers to maintain the
956 * required 16-byte alignment on the stack
957 */
958
959func _psci_sys_prep_pwrdn
960 stp x4, x5, [sp, #-16]!
961 stp x6, x7, [sp, #-16]!
962 stp x8, x9, [sp, #-16]!
963 stp x10, x11, [sp, #-16]!
964 stp x12, x13, [sp, #-16]!
965 stp x14, x15, [sp, #-16]!
966 stp x16, x17, [sp, #-16]!
967 stp x18, x30, [sp, #-16]!
968
969 mov x6, x0 /* x6 = core mask */
970
971 /* mask interrupts by setting DAIF[7:4] to 'b1111 */
972 msr DAIFSet, #0xF
973
974 /* save scr_el3 */
975 mov x0, x6
976 mrs x4, SCR_EL3
977 mov x2, x4
978 mov x1, #SCR_EL3_DATA
979 bl _setCoreData
980
981 /* allow interrupts @ EL3 */
982 orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
983 msr SCR_EL3, x4
984
985 /* save cpuectlr */
986 mov x0, x6
987 mov x1, #CPUECTLR_DATA
988 mrs x2, CORTEX_A72_ECTLR_EL1
989 mov x4, x2
990 bl _setCoreData
991
992 /* remove core from coherency */
993 bic x4, x4, #CPUECTLR_SMPEN_MASK
994 msr CORTEX_A72_ECTLR_EL1, x4
995
996 /* x6 = core mask */
997
998 /* SoC-specific programming for power-down */
999 mov x0, x6
1000 bl _soc_sys_prep_pwrdn
1001
1002 /* restore the aarch32/64 non-volatile registers
1003 */
1004 ldp x18, x30, [sp], #16
1005 ldp x16, x17, [sp], #16
1006 ldp x14, x15, [sp], #16
1007 ldp x12, x13, [sp], #16
1008 ldp x10, x11, [sp], #16
1009 ldp x8, x9, [sp], #16
1010 ldp x6, x7, [sp], #16
1011 ldp x4, x5, [sp], #16
1012 b psci_completed
1013endfunc _psci_sys_prep_pwrdn
1014
1015
1016/*
1017 * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
1018 * this function powers down the system
1019 */
1020
1021func _psci_sys_pwrdn_wfi
1022 /* save the wakeup address */
1023 mov x29, x1
1024
1025 /* x0 = core mask */
1026
1027 /* shutdown the system */
1028 bl _soc_sys_pwrdn_wfi
1029
1030 /* branch to resume execution */
1031 br x29
1032endfunc _psci_sys_pwrdn_wfi
1033
1034/*
1035 * void _psci_sys_exit_pwrdn_(u_register_t core_mask)
1036 * this function cleans up after a system power-down
1037 * x0 = core mask
1038 *
1039 * Called from C, so save the non-volatile regs
1040 * save these as pairs of registers to maintain the
1041 * required 16-byte alignment on the stack
1042 */
1043
1044func _psci_sys_exit_pwrdn
1045
1046 stp x4, x5, [sp, #-16]!
1047 stp x6, x7, [sp, #-16]!
1048 stp x8, x9, [sp, #-16]!
1049 stp x10, x11, [sp, #-16]!
1050 stp x12, x13, [sp, #-16]!
1051 stp x14, x15, [sp, #-16]!
1052 stp x16, x17, [sp, #-16]!
1053 stp x18, x30, [sp, #-16]!
1054
1055 mov x4, x0 /* x4 = core mask */
1056
1057 /* restore scr_el3 */
1058 mov x0, x4
1059 mov x1, #SCR_EL3_DATA
1060 bl _getCoreData
1061
1062 /* x0 = saved scr_el3 */
1063 msr SCR_EL3, x0
1064
1065 /* x4 = core mask */
1066
1067 /* restore cpuectlr */
1068 mov x0, x4
1069 mov x1, #CPUECTLR_DATA
1070 bl _getCoreData
1071
1072 /* make sure smp is set */
1073 orr x0, x0, #CPUECTLR_SMPEN_MASK
1074 msr CORTEX_A72_ECTLR_EL1, x0
1075
1076 /* x4 = core mask */
1077
1078 /* SoC-specific cleanup */
1079 mov x0, x4
1080 bl _soc_sys_exit_pwrdn
1081
1082 /* restore the aarch32/64 non-volatile registers
1083 */
1084 ldp x18, x30, [sp], #16
1085 ldp x16, x17, [sp], #16
1086 ldp x14, x15, [sp], #16
1087 ldp x12, x13, [sp], #16
1088 ldp x10, x11, [sp], #16
1089 ldp x8, x9, [sp], #16
1090 ldp x6, x7, [sp], #16
1091 ldp x4, x5, [sp], #16
1092 b psci_completed
1093endfunc _psci_sys_exit_pwrdn
1094
1095#endif
1096
1097
1098/* psci std returns */
1099func psci_disabled
1100 ldr w0, =PSCI_E_DISABLED
1101 b psci_completed
1102endfunc psci_disabled
1103
1104
1105func psci_not_present
1106 ldr w0, =PSCI_E_NOT_PRESENT
1107 b psci_completed
1108endfunc psci_not_present
1109
1110
1111func psci_on_pending
1112 ldr w0, =PSCI_E_ON_PENDING
1113 b psci_completed
1114endfunc psci_on_pending
1115
1116
1117func psci_already_on
1118 ldr w0, =PSCI_E_ALREADY_ON
1119 b psci_completed
1120endfunc psci_already_on
1121
1122
1123func psci_failure
1124 ldr w0, =PSCI_E_INTERN_FAIL
1125 b psci_completed
1126endfunc psci_failure
1127
1128
1129func psci_unimplemented
1130 ldr w0, =PSCI_E_NOT_SUPPORTED
1131 b psci_completed
1132endfunc psci_unimplemented
1133
1134
1135func psci_denied
1136 ldr w0, =PSCI_E_DENIED
1137 b psci_completed
1138endfunc psci_denied
1139
1140
1141func psci_invalid
1142 ldr w0, =PSCI_E_INVALID_PARAMS
1143 b psci_completed
1144endfunc psci_invalid
1145
1146
1147func psci_success
1148 mov x0, #PSCI_E_SUCCESS
1149endfunc psci_success
1150
1151
1152func psci_completed
1153 /* x0 = status code */
1154 ret
1155endfunc psci_completed