blob: 404c39ec98b78749c7652a0d2da99ea128e3fe8a [file] [log] [blame]
Jiafei Pan2f71dbf2021-09-10 14:57:49 +08001/*
2 * Copyright 2018-2021 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 .section .text, "ax"
8
9#include <asm_macros.S>
10
11#include <lib/psci/psci.h>
12#include <nxp_timer.h>
13#include <plat_gic.h>
14#include <pmu.h>
15
16#include <bl31_data.h>
17#include <plat_psci.h>
18#include <platform_def.h>
19
20 .global soc_init_lowlevel
21 .global soc_init_percpu
22 .global _set_platform_security
23 .global _soc_set_start_addr
24
25 .global _soc_core_release
26 .global _soc_ck_disabled
27 .global _soc_core_restart
28 .global _soc_core_prep_off
29 .global _soc_core_entr_off
30 .global _soc_core_exit_off
31 .global _soc_sys_reset
32 .global _soc_sys_off
33 .global _soc_core_prep_stdby
34 .global _soc_core_entr_stdby
35 .global _soc_core_exit_stdby
36 .global _soc_core_prep_pwrdn
37 .global _soc_core_entr_pwrdn
38 .global _soc_core_exit_pwrdn
39 .global _soc_clstr_prep_stdby
40 .global _soc_clstr_exit_stdby
41 .global _soc_clstr_prep_pwrdn
42 .global _soc_clstr_exit_pwrdn
43 .global _soc_sys_prep_stdby
44 .global _soc_sys_exit_stdby
45 .global _soc_sys_prep_pwrdn
46 .global _soc_sys_pwrdn_wfi
47 .global _soc_sys_exit_pwrdn
48
49 .equ TZPCDECPROT_0_SET_BASE, 0x02200804
50 .equ TZPCDECPROT_1_SET_BASE, 0x02200810
51 .equ TZPCDECPROT_2_SET_BASE, 0x0220081C
52
53 .equ TZASC_REGION_ATTRIBUTES_0_0, 0x01100110
54
55/*
56 * This function initialize the soc.
57 * in: void
58 * out: void
59 * uses x0 - x11
60 */
61func soc_init_lowlevel
62 /*
63 * Called from C, so save the non-volatile regs
64 * save these as pairs of registers to maintain the
65 * required 16-byte alignment on the stack
66 */
67 stp x4, x5, [sp, #-16]!
68 stp x6, x7, [sp, #-16]!
69 stp x8, x9, [sp, #-16]!
70 stp x10, x11, [sp, #-16]!
71 stp x12, x13, [sp, #-16]!
72 stp x18, x30, [sp, #-16]!
73
74 /*
75 * Make sure the personality has been established by releasing cores
76 * that are marked "to-be-disabled" from reset
77 */
78 bl release_disabled /* 0-8 */
79
80 /* Set SCRATCHRW7 to 0x0 */
81 ldr x0, =DCFG_SCRATCHRW7_OFFSET
82 mov x1, xzr
83 bl _write_reg_dcfg
84
85 /* Restore the aarch32/64 non-volatile registers */
86 ldp x18, x30, [sp], #16
87 ldp x12, x13, [sp], #16
88 ldp x10, x11, [sp], #16
89 ldp x8, x9, [sp], #16
90 ldp x6, x7, [sp], #16
91 ldp x4, x5, [sp], #16
92 ret
93endfunc soc_init_lowlevel
94
95/*
96 * void soc_init_percpu(void)
97 *
98 * This function performs any soc-specific initialization that is needed on
99 * a per-core basis
100 * in: none
101 * out: none
102 * uses x0 - x3
103 */
104func soc_init_percpu
105 stp x4, x30, [sp, #-16]!
106
107 bl plat_my_core_mask
108 mov x2, x0
109
110 /* x2 = core mask */
111
112 /* see if this core is marked for prefetch disable */
113 mov x0, #PREFETCH_DIS_OFFSET
114 bl _get_global_data /* 0-1 */
115 tst x0, x2
116 b.eq 1f
117 bl _disable_ldstr_pfetch_A72 /* 0 */
1181:
119 mov x0, #NXP_PMU_ADDR
120 bl enable_timer_base_to_cluster
121
122 ldp x4, x30, [sp], #16
123 ret
124endfunc soc_init_percpu
125
126/*
127 * This function determines if a core is disabled via COREDISABLEDSR
128 * in: w0 = core_mask_lsb
129 * out: w0 = 0, core not disabled
130 * w0 != 0, core disabled
131 * uses x0, x1
132 */
133func _soc_ck_disabled
134 /* get base addr of dcfg block */
135 ldr x1, =NXP_DCFG_ADDR
136
137 /* read COREDISABLEDSR */
138 ldr w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
139
140 /* test core bit */
141 and w0, w1, w0
142
143 ret
144endfunc _soc_ck_disabled
145
146/*
147 * This function sets the security mechanisms in the SoC to implement the
148 * Platform Security Policy
149 */
150func _set_platform_security
151 mov x3, x30
152
153#if (!SUPPRESS_TZC)
154 /* initialize the tzpc */
155 bl init_tzpc
156#endif
157
158#if (!SUPPRESS_SEC)
159 /* initialize secmon */
160 bl initSecMon
161#endif
162
163 mov x30, x3
164 ret
165endfunc _set_platform_security
166
167/*
168 * Part of CPU_ON
169 *
170 * This function releases a secondary core from reset
171 * in: x0 = core_mask_lsb
172 * out: none
173 * uses: x0 - x3
174 */
175_soc_core_release:
176 mov x3, x30
177
178 /*
179 * Write to CORE_HOLD to tell the bootrom that we want this core
180 * to run
181 */
182 ldr x1, =NXP_SEC_REGFILE_ADDR
183 str w0, [x1, #CORE_HOLD_OFFSET]
184
185 /* Read-modify-write BRRL to release core */
186 mov x1, #NXP_RESET_ADDR
187 ldr w2, [x1, #BRR_OFFSET]
188 orr w2, w2, w0
189 str w2, [x1, #BRR_OFFSET]
190 dsb sy
191 isb
192
193 /* Send event */
194 sev
195 isb
196
197 mov x30, x3
198 ret
199
200/*
201 * This function writes a 64-bit address to bootlocptrh/l
202 * in: x0, 64-bit address to write to BOOTLOCPTRL/H
203 * uses x0, x1, x2
204 */
205func _soc_set_start_addr
206 /* Get the 64-bit base address of the dcfg block */
207 ldr x2, =NXP_DCFG_ADDR
208
209 /* Write the 32-bit BOOTLOCPTRL register */
210 mov x1, x0
211 str w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
212
213 /* Write the 32-bit BOOTLOCPTRH register */
214 lsr x1, x0, #32
215 str w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
216 ret
217endfunc _soc_set_start_addr
218
219/*
220 * Part of CPU_ON
221 *
222 * This function restarts a core shutdown via _soc_core_entr_off
223 * in: x0 = core mask lsb (of the target cpu)
224 * out: x0 == 0, on success
225 * x0 != 0, on failure
226 * uses x0 - x6
227 */
228_soc_core_restart:
229 mov x6, x30
230 mov x4, x0
231
232 /* pgm GICD_CTLR - enable secure grp0 */
233 mov x5, #NXP_GICD_ADDR
234 ldr w2, [x5, #GICD_CTLR_OFFSET]
235 orr w2, w2, #GICD_CTLR_EN_GRP_0
236 str w2, [x5, #GICD_CTLR_OFFSET]
237 dsb sy
238 isb
239
240 /* Poll on RWP til write completes */
2414:
242 ldr w2, [x5, #GICD_CTLR_OFFSET]
243 tst w2, #GICD_CTLR_RWP
244 b.ne 4b
245
246 /*
247 * x4 = core mask lsb
248 * x5 = gicd base addr
249 */
250
251 mov x0, x4
252 bl get_mpidr_value
253
254 /* Generate target list bit */
255 and x1, x0, #MPIDR_AFFINITY0_MASK
256 mov x2, #1
257 lsl x2, x2, x1
258
259 /* Get the affinity1 field */
260 and x1, x0, #MPIDR_AFFINITY1_MASK
261 lsl x1, x1, #8
262 orr x2, x2, x1
263
264 /* Insert the INTID for SGI15 */
265 orr x2, x2, #ICC_SGI0R_EL1_INTID
266
267 /* Fire the SGI */
268 msr ICC_SGI0R_EL1, x2
269 dsb sy
270 isb
271
272 /* Load '0' on success */
273 mov x0, xzr
274
275 mov x30, x6
276 ret
277
278/*
279 * Part of CPU_OFF
280 *
281 * This function programs SoC & GIC registers in preparation for shutting down
282 * the core
283 * in: x0 = core mask lsb
284 * out: none
285 * uses x0 - x7
286 */
287_soc_core_prep_off:
288 mov x8, x30
289 mov x7, x0
290
291 /* x7 = core mask lsb */
292
293 mrs x1, CPUECTLR_EL1
294
295 /* Set smp and disable L2 snoops in cpuectlr */
296 orr x1, x1, #CPUECTLR_SMPEN_EN
297 orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
298 bic x1, x1, #CPUECTLR_INS_PREFETCH_MASK
299 bic x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
300
301 /* Set retention control in cpuectlr */
302 bic x1, x1, #CPUECTLR_TIMER_MASK
303 orr x1, x1, #CPUECTLR_TIMER_2TICKS
304 msr CPUECTLR_EL1, x1
305
306 /* Get redistributor rd base addr for this core */
307 mov x0, x7
308 bl get_gic_rd_base
309 mov x6, x0
310
311 /* Get redistributor sgi base addr for this core */
312 mov x0, x7
313 bl get_gic_sgi_base
314 mov x5, x0
315
316 /*
317 * x5 = gicr sgi base addr
318 * x6 = gicr rd base addr
319 * x7 = core mask lsb
320 */
321
322 /* Disable SGI 15 at redistributor - GICR_ICENABLER0 */
323 mov w3, #GICR_ICENABLER0_SGI15
324 str w3, [x5, #GICR_ICENABLER0_OFFSET]
3252:
326 /* Poll on rwp bit in GICR_CTLR */
327 ldr w4, [x6, #GICR_CTLR_OFFSET]
328 tst w4, #GICR_CTLR_RWP
329 b.ne 2b
330
331 /* Disable GRP1 interrupts at cpu interface */
332 msr ICC_IGRPEN1_EL3, xzr
333
334 /* Disable GRP0 ints at cpu interface */
335 msr ICC_IGRPEN0_EL1, xzr
336
337 /* Program the redistributor - poll on GICR_CTLR.RWP as needed */
338
339 /* Define SGI 15 as Grp0 - GICR_IGROUPR0 */
340 ldr w4, [x5, #GICR_IGROUPR0_OFFSET]
341 bic w4, w4, #GICR_IGROUPR0_SGI15
342 str w4, [x5, #GICR_IGROUPR0_OFFSET]
343
344 /* Define SGI 15 as Grp0 - GICR_IGRPMODR0 */
345 ldr w3, [x5, #GICR_IGRPMODR0_OFFSET]
346 bic w3, w3, #GICR_IGRPMODR0_SGI15
347 str w3, [x5, #GICR_IGRPMODR0_OFFSET]
348
349 /* Set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
350 ldr w4, [x5, #GICR_IPRIORITYR3_OFFSET]
351 bic w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
352 str w4, [x5, #GICR_IPRIORITYR3_OFFSET]
353
354 /* Enable SGI 15 at redistributor - GICR_ISENABLER0 */
355 mov w3, #GICR_ISENABLER0_SGI15
356 str w3, [x5, #GICR_ISENABLER0_OFFSET]
357 dsb sy
358 isb
3593:
360 /* Poll on rwp bit in GICR_CTLR */
361 ldr w4, [x6, #GICR_CTLR_OFFSET]
362 tst w4, #GICR_CTLR_RWP
363 b.ne 3b
364
365 /* Quiesce the debug interfaces */
366 mrs x3, osdlr_el1
367 orr x3, x3, #OSDLR_EL1_DLK_LOCK
368 msr osdlr_el1, x3
369 isb
370
371 /* Enable grp0 ints */
372 mov x3, #ICC_IGRPEN0_EL1_EN
373 msr ICC_IGRPEN0_EL1, x3
374
375 /*
376 * x5 = gicr sgi base addr
377 * x6 = gicr rd base addr
378 * x7 = core mask lsb
379 */
380
381 /* Clear any pending interrupts */
382 mvn w1, wzr
383 str w1, [x5, #GICR_ICPENDR0_OFFSET]
384
385 /* Make sure system counter is enabled */
386 ldr x3, =NXP_TIMER_ADDR
387 ldr w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
388 tst w0, #SYS_COUNTER_CNTCR_EN
389 b.ne 4f
390 orr w0, w0, #SYS_COUNTER_CNTCR_EN
391 str w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
3924:
393 /* Enable the core timer and mask timer interrupt */
394 mov x1, #CNTP_CTL_EL0_EN
395 orr x1, x1, #CNTP_CTL_EL0_IMASK
396 msr cntp_ctl_el0, x1
397
398 isb
399 mov x30, x8
400 ret
401
402/*
403 * Part of CPU_OFF
404 *
405 * This function performs the final steps to shutdown the core
406 * in: x0 = core mask lsb
407 * out: none
408 * uses x0 - x5
409 */
410_soc_core_entr_off:
411 mov x5, x30
412 mov x4, x0
413
414 /* x4 = core mask */
4151:
416 /* Enter low-power state by executing wfi */
417 wfi
418
419 /* See if SGI15 woke us up */
420 mrs x2, ICC_IAR0_EL1
421 mov x3, #ICC_IAR0_EL1_SGI15
422 cmp x2, x3
423 b.ne 1b
424
425 /* Deactivate the int */
426 msr ICC_EOIR0_EL1, x2
427
428 /* x4 = core mask */
4292:
430 /* Check if core has been turned on */
431 mov x0, x4
432 bl _getCoreState
433
434 /* x0 = core state */
435
436 cmp x0, #CORE_WAKEUP
437 b.ne 1b
438
439 /* If we get here, then we have exited the wfi */
440 mov x30, x5
441 ret
442
443/*
444 * Part of CPU_OFF
445 *
446 * This function starts the process of starting a core back up
447 * in: x0 = core mask lsb
448 * out: none
449 * uses x0, x1, x2, x3, x4, x5, x6
450 */
451_soc_core_exit_off:
452 mov x6, x30
453 mov x5, x0
454
455 /* Disable forwarding of GRP0 ints at cpu interface */
456 msr ICC_IGRPEN0_EL1, xzr
457
458 /* Get redistributor sgi base addr for this core */
459 mov x0, x5
460 bl get_gic_sgi_base
461 mov x4, x0
462
463 /* x4 = gicr sgi base addr */
464 /* x5 = core mask */
465
466 /* Disable SGI 15 at redistributor - GICR_ICENABLER0 */
467 mov w1, #GICR_ICENABLER0_SGI15
468 str w1, [x4, #GICR_ICENABLER0_OFFSET]
469
470 /* Get redistributor rd base addr for this core */
471 mov x0, x5
472 bl get_gic_rd_base
473 mov x4, x0
474
475 /* x4 = gicr rd base addr */
4762:
477 /* Poll on rwp bit in GICR_CTLR */
478 ldr w2, [x4, #GICR_CTLR_OFFSET]
479 tst w2, #GICR_CTLR_RWP
480 b.ne 2b
481
482 /* x4 = gicr rd base addr */
483
484 /* Unlock the debug interfaces */
485 mrs x3, osdlr_el1
486 bic x3, x3, #OSDLR_EL1_DLK_LOCK
487 msr osdlr_el1, x3
488 isb
489
490 dsb sy
491 isb
492 mov x30, x6
493 ret
494
495/*
496 * This function requests a reset of the entire SOC
497 * in: none
498 * out: none
499 * uses: x0, x1, x2, x3, x4, x5, x6
500 */
501_soc_sys_reset:
502 mov x3, x30
503
504 /* Make sure the mask is cleared in the reset request mask register */
505 mov x0, #RST_RSTRQMR1_OFFSET
506 mov w1, wzr
507 bl _write_reg_reset
508
509 /* Set the reset request */
510 mov x4, #RST_RSTCR_OFFSET
511 mov x0, x4
512 mov w1, #RSTCR_RESET_REQ
513 bl _write_reg_reset
514
515 /* x4 = RST_RSTCR_OFFSET */
516
517 /*
518 * Just in case this address range is mapped as cacheable,
519 * flush the write out of the dcaches
520 */
521 mov x2, #NXP_RESET_ADDR
522 add x2, x2, x4
523 dc cvac, x2
524 dsb st
525 isb
526
527 /* This function does not return */
5281:
529 wfi
530 b 1b
531
532/*
533 * Part of SYSTEM_OFF
534 *
535 * This function turns off the SoC clocks
536 * Note: this function is not intended to return, and the only allowable
537 * recovery is POR
538 * in: none
539 * out: none
540 * uses x0, x1, x2, x3
541 */
542_soc_sys_off:
543 /*
544 * Disable sec, spi and flexspi
545 * TBD - Check if eNETC needs to be disabled
546 */
547 ldr x2, =NXP_DCFG_ADDR
548 ldr x0, =DCFG_DEVDISR1_OFFSET
549 ldr w1, =DCFG_DEVDISR1_SEC
550 str w1, [x2, x0]
551 ldr x0, =DCFG_DEVDISR4_OFFSET
552 ldr w1, =DCFG_DEVDISR4_SPI_QSPI
553 str w1, [x2, x0]
554
555 /* Set TPMWAKEMR0 */
556 ldr x0, =TPMWAKEMR0_ADDR
557 mov w1, #0x1
558 str w1, [x0]
559
560 /* Disable icache, dcache, mmu @ EL1 */
561 mov x1, #SCTLR_I_C_M_MASK
562 mrs x0, sctlr_el1
563 bic x0, x0, x1
564 msr sctlr_el1, x0
565
566 /* Disable L2 prefetches */
567 mrs x0, CPUECTLR_EL1
568 orr x0, x0, #CPUECTLR_SMPEN_EN
569 bic x0, x0, #CPUECTLR_TIMER_MASK
570 orr x0, x0, #CPUECTLR_TIMER_2TICKS
571 msr CPUECTLR_EL1, x0
572 dsb sy
573 isb
574
575 /* Disable CCI snoop domain */
576 ldr x0, =NXP_CCI_ADDR
577 mov w1, #0x1
578 str w1, [x0]
579
580 bl get_pmu_idle_core_mask
581
582 /* x3 = pmu base addr */
583 mov x3, #NXP_PMU_ADDR
5844:
585 ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
586 cmp w1, w0
587 b.ne 4b
588
589 bl get_pmu_idle_cluster_mask
590 mov x3, #NXP_PMU_ADDR
591 str w0, [x3, #PMU_CLAINACTSETR_OFFSET]
592
593 bl get_pmu_idle_core_mask
594 mov x3, #NXP_PMU_ADDR
5951:
596 ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
597 cmp w1, w0
598 b.ne 1b
599
600 bl get_pmu_flush_cluster_mask
601 mov x3, #NXP_PMU_ADDR
602 str w0, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
6032:
604 ldr w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
605 cmp w1, w0
606 b.ne 2b
607
608 str w0, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
609
610 str w0, [x3, #PMU_CLSINACTSETR_OFFSET]
611
612 mov x2, #DAIF_SET_MASK
613 mrs x1, spsr_el1
614 orr x1, x1, x2
615 msr spsr_el1, x1
616
617 mrs x1, spsr_el2
618 orr x1, x1, x2
619 msr spsr_el2, x1
620
621 /* Force the debug interface to be quiescent */
622 mrs x0, osdlr_el1
623 orr x0, x0, #0x1
624 msr osdlr_el1, x0
625
626 /* Invalidate all TLB entries at all 3 exception levels */
627 tlbi alle1
628 tlbi alle2
629 tlbi alle3
630
631 /* x3 = pmu base addr */
632
633 /* Request lpm20 */
634 ldr x0, =PMU_POWMGTCSR_OFFSET
635 ldr w1, =PMU_POWMGTCSR_VAL
636 str w1, [x3, x0]
637 isb
638 dsb sy
6395:
640 wfe
641 b.eq 5b
642
643/*
644 * Part of CPU_SUSPEND
645 *
646 * This function performs SoC-specific programming prior to standby
647 * in: x0 = core mask lsb
648 * out: none
649 * uses x0, x1
650 */
651_soc_core_prep_stdby:
652 /* Clear CPUECTLR_EL1[2:0] */
653 mrs x1, CPUECTLR_EL1
654 bic x1, x1, #CPUECTLR_TIMER_MASK
655 msr CPUECTLR_EL1, x1
656
657 ret
658
659/*
660 * Part of CPU_SUSPEND
661 *
662 * This function puts the calling core into standby state
663 * in: x0 = core mask lsb
664 * out: none
665 * uses x0
666 */
667_soc_core_entr_stdby:
668 /* X0 = core mask lsb */
669 dsb sy
670 isb
671 wfi
672
673 ret
674
675/*
676 * Part of CPU_SUSPEND
677 *
678 * This function performs any SoC-specific cleanup after standby state
679 * in: x0 = core mask lsb
680 * out: none
681 * uses none
682 */
683_soc_core_exit_stdby:
684 ret
685
686/*
687 * Part of CPU_SUSPEND
688 *
689 * This function performs SoC-specific programming prior to power-down
690 * in: x0 = core mask lsb
691 * out: none
692 * uses x0, x1, x2
693 */
694_soc_core_prep_pwrdn:
695 /* Make sure system counter is enabled */
696 ldr x2, =NXP_TIMER_ADDR
697 ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
698 tst w0, #SYS_COUNTER_CNTCR_EN
699 b.ne 1f
700 orr w0, w0, #SYS_COUNTER_CNTCR_EN
701 str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
7021:
703 /*
704 * Enable dynamic retention control (CPUECTLR[2:0])
705 * Set the SMPEN bit (CPUECTLR[6])
706 */
707 mrs x1, CPUECTLR_EL1
708 bic x1, x1, #CPUECTLR_RET_MASK
709 orr x1, x1, #CPUECTLR_TIMER_2TICKS
710 orr x1, x1, #CPUECTLR_SMPEN_EN
711 msr CPUECTLR_EL1, x1
712
713 isb
714 ret
715
716/*
717 * Part of CPU_SUSPEND
718 *
719 * This function puts the calling core into a power-down state
720 * in: x0 = core mask lsb
721 * out: none
722 * uses x0
723 */
724_soc_core_entr_pwrdn:
725 /* X0 = core mask lsb */
726 dsb sy
727 isb
728 wfi
729
730 ret
731
732/*
733 * Part of CPU_SUSPEND
734 *
735 * This function performs any SoC-specific cleanup after power-down state
736 * in: x0 = core mask lsb
737 * out: none
738 * uses none
739 */
740_soc_core_exit_pwrdn:
741 ret
742
743/*
744 * Part of CPU_SUSPEND
745 *
746 * This function performs SoC-specific programming prior to standby
747 * in: x0 = core mask lsb
748 * out: none
749 * uses x0, x1
750 */
751_soc_clstr_prep_stdby:
752 /* Clear CPUECTLR_EL1[2:0] */
753 mrs x1, CPUECTLR_EL1
754 bic x1, x1, #CPUECTLR_TIMER_MASK
755 msr CPUECTLR_EL1, x1
756
757 ret
758
759/*
760 * Part of CPU_SUSPEND
761 *
762 * This function performs any SoC-specific cleanup after standby state
763 * in: x0 = core mask lsb
764 * out: none
765 * uses none
766 */
767_soc_clstr_exit_stdby:
768 ret
769
770/*
771 * Part of CPU_SUSPEND
772 *
773 * This function performs SoC-specific programming prior to power-down
774 * in: x0 = core mask lsb
775 * out: none
776 * uses x0, x1, x2
777 */
778_soc_clstr_prep_pwrdn:
779 /* Make sure system counter is enabled */
780 ldr x2, =NXP_TIMER_ADDR
781 ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
782 tst w0, #SYS_COUNTER_CNTCR_EN
783 b.ne 1f
784 orr w0, w0, #SYS_COUNTER_CNTCR_EN
785 str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
7861:
787 /*
788 * Enable dynamic retention control (CPUECTLR[2:0])
789 * Set the SMPEN bit (CPUECTLR[6])
790 */
791 mrs x1, CPUECTLR_EL1
792 bic x1, x1, #CPUECTLR_RET_MASK
793 orr x1, x1, #CPUECTLR_TIMER_2TICKS
794 orr x1, x1, #CPUECTLR_SMPEN_EN
795 msr CPUECTLR_EL1, x1
796
797 isb
798 ret
799
800/*
801 * Part of CPU_SUSPEND
802 *
803 * This function performs any SoC-specific cleanup after power-down state
804 * in: x0 = core mask lsb
805 * out: none
806 * uses none
807 */
808_soc_clstr_exit_pwrdn:
809 ret
810
811/*
812 * Part of CPU_SUSPEND
813 *
814 * This function performs SoC-specific programming prior to standby
815 * in: x0 = core mask lsb
816 * out: none
817 * uses x0, x1
818 */
819_soc_sys_prep_stdby:
820 /* Clear CPUECTLR_EL1[2:0] */
821 mrs x1, CPUECTLR_EL1
822 bic x1, x1, #CPUECTLR_TIMER_MASK
823 msr CPUECTLR_EL1, x1
824
825 ret
826
827/*
828 * Part of CPU_SUSPEND
829 *
830 * This function performs any SoC-specific cleanup after standby state
831 * in: x0 = core mask lsb
832 * out: none
833 * uses none
834 */
835_soc_sys_exit_stdby:
836 ret
837
838/*
839 * Part of CPU_SUSPEND
840 *
841 * This function performs SoC-specific programming prior to
842 * suspend-to-power-down
843 * in: x0 = core mask lsb
844 * out: none
845 * uses x0, x1, x2, x3, x4
846 */
847_soc_sys_prep_pwrdn:
848 /* Set retention control */
849 mrs x0, CPUECTLR_EL1
850 bic x0, x0, #CPUECTLR_TIMER_MASK
851 orr x0, x0, #CPUECTLR_TIMER_2TICKS
852 orr x0, x0, #CPUECTLR_SMPEN_EN
853 msr CPUECTLR_EL1, x0
854 dsb sy
855 isb
856 ret
857
858/*
859 * Part of CPU_SUSPEND
860 *
861 * This function puts the calling core, and potentially the soc, into a
862 * low-power state
863 * in: x0 = core mask lsb
864 * out: x0 = 0, success
865 * x0 < 0, failure
866 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x13, x14, x15,
867 * x16, x17, x18
868 */
869_soc_sys_pwrdn_wfi:
870 mov x18, x30
871
872 mov x3, #NXP_PMU_ADDR
873
874 /* x3 = pmu base addr */
875
876 /* Backup epu registers to stack */
877 ldr x2, =NXP_EPU_ADDR
878 ldr w4, [x2, #EPU_EPIMCR10_OFFSET]
879 ldr w5, [x2, #EPU_EPCCR10_OFFSET]
880 ldr w6, [x2, #EPU_EPCTR10_OFFSET]
881 ldr w7, [x2, #EPU_EPGCR_OFFSET]
882 stp x4, x5, [sp, #-16]!
883 stp x6, x7, [sp, #-16]!
884
885 /*
886 * x2 = epu base addr
887 * x3 = pmu base addr
888 */
889
890 /* Set up EPU event to receive the wake signal from PMU */
891 mov w4, #EPU_EPIMCR10_VAL
892 mov w5, #EPU_EPCCR10_VAL
893 mov w6, #EPU_EPCTR10_VAL
894 mov w7, #EPU_EPGCR_VAL
895 str w4, [x2, #EPU_EPIMCR10_OFFSET]
896 str w5, [x2, #EPU_EPCCR10_OFFSET]
897 str w6, [x2, #EPU_EPCTR10_OFFSET]
898 str w7, [x2, #EPU_EPGCR_OFFSET]
899
900 ldr x2, =NXP_GICD_ADDR
901
902 /*
903 * x2 = gicd base addr
904 * x3 = pmu base addr
905 */
906
907 /* Backup flextimer/mmc/usb interrupt router */
908 ldr x0, =GICD_IROUTER60_OFFSET
909 ldr x1, =GICD_IROUTER76_OFFSET
910 ldr w4, [x2, x0]
911 ldr w5, [x2, x1]
912 ldr x0, =GICD_IROUTER112_OFFSET
913 ldr x1, =GICD_IROUTER113_OFFSET
914 ldr w6, [x2, x0]
915 ldr w7, [x2, x1]
916 stp x4, x5, [sp, #-16]!
917 stp x6, x7, [sp, #-16]!
918
919 /*
920 * x2 = gicd base addr
921 * x3 = pmu base addr
922 * x0 = GICD_IROUTER112_OFFSET
923 * x1 = GICD_IROUTER113_OFFSET
924 */
925
926 /* Re-route interrupt to cluster 1 */
927 ldr w4, =GICD_IROUTER_VALUE
928 str w4, [x2, x0]
929 str w4, [x2, x1]
930 ldr x0, =GICD_IROUTER60_OFFSET
931 ldr x1, =GICD_IROUTER76_OFFSET
932 str w4, [x2, x0]
933 str w4, [x2, x1]
934 dsb sy
935 isb
936
937 /* x3 = pmu base addr */
938
939 /* Disable sec, Check for eNETC, spi and qspi */
940 ldr x2, =NXP_DCFG_ADDR
941 ldr x0, =DCFG_DEVDISR1_OFFSET
942 ldr w1, =DCFG_DEVDISR1_SEC
943 str w1, [x2, x0]
944
945 ldr x0, =DCFG_DEVDISR4_OFFSET
946 ldr w1, =DCFG_DEVDISR4_SPI_QSPI
947 str w1, [x2, x0]
948
949 /* x3 = pmu base addr */
950
951 /* Set TPMWAKEMR0 */
952 ldr x0, =TPMWAKEMR0_ADDR
953 mov w1, #0x1
954 str w1, [x0]
955
956 /* Disable CCI snoop domain */
957 ldr x0, =NXP_CCI_ADDR
958 mov w1, #0x1
959 str w1, [x0]
960
961 /* Setup retention control */
962 mrs x0, CPUECTLR_EL1
963 orr x0, x0, #CPUECTLR_SMPEN_EN
964 orr x0, x0, #CPUECTLR_TIMER_2TICKS
965 msr CPUECTLR_EL1, x0
966 dsb sy
967 isb
968
969 bl get_pmu_idle_core_mask
970 mov x3, #NXP_PMU_ADDR
9718:
972 ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
973 cmp w1, w0
974 b.ne 8b
975
976 /* x3 = NXP_PMU_ADDR */
977 /* 1 cluster SoC */
978
979 bl get_pmu_idle_cluster_mask
980 mov x3, #NXP_PMU_ADDR
981
982 str w0, [x3, #PMU_CLAINACTSETR_OFFSET]
983
984 bl get_pmu_idle_core_mask
985 /* x3 = NXP_PMU_ADDR */
986 mov x3, #NXP_PMU_ADDR
9871:
988 ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
989 cmp w1, w0
990 b.ne 1b
991
992 /* x3 = NXP_PMU_ADDR */
993 bl get_pmu_flush_cluster_mask
994 mov x3, #NXP_PMU_ADDR
995
996 str w0, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
997
998 /* x3 = NXP_PMU_ADDR */
9992:
1000 ldr w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
1001 cmp w1, w0
1002 b.ne 2b
1003
1004 /* x3 = NXP_PMU_ADDR */
1005
1006 str w0, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
1007
1008 str w0, [x3, #PMU_CLSINACTSETR_OFFSET]
1009
1010 /* Force the debug interface to be quiescent */
1011 mrs x0, osdlr_el1
1012 orr x0, x0, #0x1
1013 msr osdlr_el1, x0
1014
1015 /*
1016 * Enable the WakeRequest signal
1017 * x3 is cpu mask starting from cpu1 to cpu0
1018 */
1019 bl get_tot_num_cores
1020 sub x0, x0, #1
1021 mov x3, #0x1
1022 lsl x3, x3, x0
10232:
1024 mov x0, x3
1025 bl get_gic_rd_base // 0-2
1026 ldr w1, [x0, #GICR_WAKER_OFFSET]
1027 orr w1, w1, #GICR_WAKER_SLEEP_BIT
1028 str w1, [x0, #GICR_WAKER_OFFSET]
10291:
1030 ldr w1, [x0, #GICR_WAKER_OFFSET]
1031 cmp w1, #GICR_WAKER_ASLEEP
1032 b.ne 1b
1033
1034 lsr x3, x3, #1
1035 cbnz x3, 2b
1036
1037 /* Invalidate all TLB entries at all 3 exception levels */
1038 tlbi alle1
1039 tlbi alle2
1040 tlbi alle3
1041
1042 /* Request lpm20 */
1043 mov x3, #NXP_PMU_ADDR
1044 ldr x0, =PMU_POWMGTCSR_OFFSET
1045 ldr w1, =PMU_POWMGTCSR_VAL
1046 str w1, [x3, x0]
1047
1048 ldr x5, =NXP_EPU_ADDR
10494:
1050 wfe
1051 ldr w1, [x5, #EPU_EPCTR10_OFFSET]
1052 cmp w1, #0
1053 b.eq 4b
1054
1055 /* x3 = NXP_PMU_ADDR */
1056
1057 bl get_pmu_idle_cluster_mask
1058 mov x3, NXP_PMU_ADDR
1059
1060 /* Re-enable the GPP ACP */
1061 str w0, [x3, #PMU_CLAINACTCLRR_OFFSET]
1062 str w0, [x3, #PMU_CLSINACTCLRR_OFFSET]
1063
1064 /* x3 = NXP_PMU_ADDR */
10653:
1066 ldr w1, [x3, #PMU_CLAINACTSETR_OFFSET]
1067 cbnz w1, 3b
10684:
1069 ldr w1, [x3, #PMU_CLSINACTSETR_OFFSET]
1070 cbnz w1, 4b
1071
1072 /*
1073 * Enable the WakeRequest signal on cpu 0-1
1074 * x3 is cpu mask starting from cpu1
1075 */
1076 bl get_tot_num_cores
1077 sub x0, x0, #1
1078 mov x3, #0x1
1079 lsl x3, x3, x0
10802:
1081 mov x0, x3
1082 bl get_gic_rd_base // 0-2
1083 ldr w1, [x0, #GICR_WAKER_OFFSET]
1084 bic w1, w1, #GICR_WAKER_SLEEP_BIT
1085 str w1, [x0, #GICR_WAKER_OFFSET]
10861:
1087 ldr w1, [x0, #GICR_WAKER_OFFSET]
1088 cbnz w1, 1b
1089
1090 lsr x3, x3, #1
1091 cbnz x3, 2b
1092
1093 /* Enable CCI snoop domain */
1094 ldr x0, =NXP_CCI_ADDR
1095 str wzr, [x0]
1096 dsb sy
1097 isb
1098
1099 ldr x3, =NXP_EPU_ADDR
1100
1101 /* x3 = epu base addr */
1102
1103 /* Enable sec, enetc, spi and qspi */
1104 ldr x2, =NXP_DCFG_ADDR
1105 str wzr, [x2, #DCFG_DEVDISR1_OFFSET]
1106 str wzr, [x2, #DCFG_DEVDISR2_OFFSET]
1107 str wzr, [x2, #DCFG_DEVDISR4_OFFSET]
1108
1109 /* Restore flextimer/mmc/usb interrupt router */
1110 ldr x3, =NXP_GICD_ADDR
1111 ldp x0, x2, [sp], #16
1112 ldr x1, =GICD_IROUTER113_OFFSET
1113 str w2, [x3, x1]
1114 ldr x1, =GICD_IROUTER112_OFFSET
1115 str w0, [x3, x1]
1116 ldp x0, x2, [sp], #16
1117 ldr x1, =GICD_IROUTER76_OFFSET
1118 str w2, [x3, x1]
1119 ldr x1, =GICD_IROUTER60_OFFSET
1120 str w0, [x3, x1]
1121
1122 /* Restore EPU registers */
1123 ldr x3, =NXP_EPU_ADDR
1124 ldp x0, x2, [sp], #16
1125 str w2, [x3, #EPU_EPGCR_OFFSET]
1126 str w0, [x3, #EPU_EPCTR10_OFFSET]
1127 ldp x2, x1, [sp], #16
1128 str w1, [x3, #EPU_EPCCR10_OFFSET]
1129 str w2, [x3, #EPU_EPIMCR10_OFFSET]
1130
1131 dsb sy
1132 isb
1133 mov x30, x18
1134 ret
1135
1136/*
1137 * Part of CPU_SUSPEND
1138 *
1139 * This function performs any SoC-specific cleanup after power-down
1140 * in: x0 = core mask lsb
1141 * out: none
1142 * uses x0, x1
1143 */
1144_soc_sys_exit_pwrdn:
1145 /* Enable stack alignment checking */
1146 mrs x1, SCTLR_EL1
1147 orr x1, x1, #0x4
1148 msr SCTLR_EL1, x1
1149
1150 /* Enable debug interface */
1151 mrs x1, osdlr_el1
1152 bic x1, x1, #OSDLR_EL1_DLK_LOCK
1153 msr osdlr_el1, x1
1154
1155 /* Enable i-cache */
1156 mrs x1, SCTLR_EL3
1157 orr x1, x1, #SCTLR_I_MASK
1158 msr SCTLR_EL3, x1
1159
1160 isb
1161 ret
1162
1163/*
1164 * This function setc up the TrustZone Address Space Controller (TZASC)
1165 * in: none
1166 * out: none
1167 * uses x0, x1
1168 */
1169init_tzpc:
1170 /* Set Non Secure access for all devices protected via TZPC */
1171 ldr x1, =TZPCDECPROT_0_SET_BASE /* decode Protection-0 Set Reg */
1172 mov w0, #0xFF /* set decode region to NS, Bits[7:0] */
1173 str w0, [x1]
1174
1175 ldr x1, =TZPCDECPROT_1_SET_BASE /* decode Protection-1 Set Reg */
1176 mov w0, #0xFF /* set decode region to NS, Bits[7:0] */
1177 str w0, [x1]
1178
1179 ldr x1, =TZPCDECPROT_2_SET_BASE /* decode Protection-2 Set Reg */
1180 mov w0, #0xFF /* set decode region to NS, Bits[7:0] */
1181 str w0, [x1]
1182
1183 /* entire SRAM as NS */
1184 ldr x1, =NXP_OCRAM_TZPC_ADDR /* secure RAM region size Reg */
1185 mov w0, #0x00000000 /* 0x00000000 = no secure region */
1186 str w0, [x1]
1187
1188 ret
1189
1190/*
1191 * This function performs any needed initialization on SecMon for
1192 * boot services
1193 */
1194initSecMon:
1195 /* Read the register hpcomr */
1196 ldr x1, =NXP_SNVS_ADDR
1197 ldr w0, [x1, #SECMON_HPCOMR_OFFSET]
1198 /* Turn off secure access for the privileged registers */
1199 orr w0, w0, #SECMON_HPCOMR_NPSWAEN
1200 /* Write back */
1201 str w0, [x1, #SECMON_HPCOMR_OFFSET]
1202
1203 ret
1204
1205/*
1206 * This function checks to see if cores which are to be disabled have been
1207 * released from reset - if not, it releases them
1208 * in: none
1209 * out: none
1210 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8
1211 */
1212release_disabled:
1213 stp x18, x30, [sp, #-16]!
1214
1215 /*
1216 * Get the number of cpus on this device
1217 * Calling the below c function.
1218 * No need to Callee saved registers x9-x15,
1219 * as these registers are not used by the callee
1220 * prior to calling the below C-routine.
1221 */
1222 bl get_tot_num_cores
1223 mov x6, x0
1224
1225 /* Read COREDISABLESR */
1226 mov x0, #NXP_DCFG_ADDR
1227 ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1228
1229 mov x0, #NXP_RESET_ADDR
1230 ldr w5, [x0, #BRR_OFFSET]
1231
1232 /* Load the core mask for the first core */
1233 mov x7, #1
1234
1235 /*
1236 * x4 = COREDISABLESR
1237 * x5 = BRR
1238 * x6 = loop count
1239 * x7 = core mask bit
1240 */
12412:
1242 /* Check if the core is to be disabled */
1243 tst x4, x7
1244 b.eq 1f
1245
1246 /* See if disabled cores have already been released from reset */
1247 tst x5, x7
1248 b.ne 1f
1249
1250 /* If core has not been released, then release it (0-3) */
1251 mov x0, x7
1252 bl _soc_core_release
1253
1254 /* Record the core state in the data area (0-3) */
1255 mov x0, x7
1256 mov x1, #CORE_DISABLED
1257 bl _setCoreState
12581:
1259 /* Decrement the counter */
1260 subs x6, x6, #1
1261 b.le 3f
1262 /* Shift the core mask to the next core */
1263 lsl x7, x7, #1
1264 /* Continue */
1265 b 2b
12663:
1267 ldp x18, x30, [sp], #16
1268 ret
1269
1270/*
1271 * Write a register in the DCFG block
1272 * in: x0 = offset
1273 * in: w1 = value to write
1274 * uses x0, x1, x2
1275 */
1276_write_reg_dcfg:
1277 ldr x2, =NXP_DCFG_ADDR
1278 str w1, [x2, x0]
1279 ret
1280
1281/*
1282 * Read a register in the DCFG block
1283 * in: x0 = offset
1284 * out: w0 = value read
1285 * uses x0, x1, x2
1286 */
1287_read_reg_dcfg:
1288 ldr x2, =NXP_DCFG_ADDR
1289 ldr w1, [x2, x0]
1290 mov w0, w1
1291 ret
1292
1293/*
1294 * This function returns an mpidr value for a core, given a core_mask_lsb
1295 * in: x0 = core mask lsb
1296 * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
1297 * uses x0, x1
1298 */
1299get_mpidr_value:
1300 /* Convert a core mask to an SoC core number */
1301 clz w0, w0
1302 mov w1, #31
1303 sub w0, w1, w0
1304
1305 /* Get the mpidr core number from the SoC core number */
1306 mov w1, wzr
1307 tst x0, #1
1308 b.eq 1f
1309 orr w1, w1, #1
13101:
1311 /* Extract the cluster number */
1312 lsr w0, w0, #1
1313 orr w0, w1, w0, lsl #8
1314
1315 ret
1316
1317/*
1318 * This function returns the redistributor base address for the core specified
1319 * in x1
1320 * in: x0 - core mask lsb of specified core
1321 * out: x0 = redistributor rd base address for specified core
1322 * uses x0, x1, x2
1323 */
1324get_gic_rd_base:
1325 /* Get the 0-based core number */
1326 clz w1, w0
1327 mov w2, #0x20
1328 sub w2, w2, w1
1329 sub w2, w2, #1
1330
1331 /* x2 = core number / loop counter */
1332 ldr x0, =NXP_GICR_ADDR
1333 mov x1, #GIC_RD_OFFSET
13342:
1335 cbz x2, 1f
1336 add x0, x0, x1
1337 sub x2, x2, #1
1338 b 2b
13391:
1340 ret
1341
1342/*
1343 * This function returns the redistributor base address for the core specified
1344 * in x1
1345 * in: x0 - core mask lsb of specified core
1346 * out: x0 = redistributor sgi base address for specified core
1347 * uses x0, x1, x2
1348 */
1349get_gic_sgi_base:
1350 /* Get the 0-based core number */
1351 clz w1, w0
1352 mov w2, #0x20
1353 sub w2, w2, w1
1354 sub w2, w2, #1
1355
1356 /* x2 = core number / loop counter */
1357 ldr x0, =NXP_GICR_SGI_ADDR
1358 mov x1, #GIC_SGI_OFFSET
13592:
1360 cbz x2, 1f
1361 add x0, x0, x1
1362 sub x2, x2, #1
1363 b 2b
13641:
1365 ret
1366
1367/*
1368 * Write a register in the RESET block
1369 * in: x0 = offset
1370 * in: w1 = value to write
1371 * uses x0, x1, x2
1372 */
1373_write_reg_reset:
1374 ldr x2, =NXP_RESET_ADDR
1375 str w1, [x2, x0]
1376 ret
1377
1378/*
1379 * Read a register in the RESET block
1380 * in: x0 = offset
1381 * out: w0 = value read
1382 * uses x0, x1
1383 */
1384_read_reg_reset:
1385 ldr x1, =NXP_RESET_ADDR
1386 ldr w0, [x1, x0]
1387 ret