blob: a1baf79221d6e3c2f73aed7fbaa4c10abceeb201 [file] [log] [blame]
Jiafei Panf6954b12021-09-26 11:51:42 +08001/*
2 * Copyright 2018-2021 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <asm_macros.S>
8#include <cortex_a53.h>
9#include <dcfg_lsch2.h>
10#include <plat_gic.h>
11#include <scfg.h>
12
13#include <bl31_data.h>
14#include <plat_psci.h>
15#include <platform_def.h>
16
17/* the BASE address for these offsets is AUX_01_DATA in the */
18/* bootcore's psci data region */
19#define DEVDISR2_MASK_OFFSET 0x0 /* references AUX_01_DATA */
20#define DEVDISR5_MASK_OFFSET 0x8 /* references AUX_02_DATA */
21#define CPUACTLR_DATA_OFFSET 0x10 /* references AUX_03_DATA */
22/* the BASE address for these offsets is AUX_04_DATA in the */
23/* bootcore's psci data region */
24#define GICD_BASE_ADDR_OFFSET 0x0 /* references AUX_04_DATA */
25#define GICC_BASE_ADDR_OFFSET 0x8 /* references AUX_05_DATA */
26
27#define DAIF_DATA AUX_06_DATA /* references AUX_06_DATA */
28
29#define IPSTPACK_RETRY_CNT 0x10000
30#define DDR_SLEEP_RETRY_CNT 0x10000
31#define CPUACTLR_EL1 S3_1_C15_C2_0
32#define DDR_SDRAM_CFG_2_FRCSR 0x80000000
33#define DDR_SDRAM_CFG_2_OFFSET 0x114
34#define DDR_TIMING_CFG_4_OFFSET 0x160
35#define DDR_CNTRL_BASE_ADDR 0x01080000
36
37#define DLL_LOCK_MASK 0x3
38#define DLL_LOCK_VALUE 0x2
39
40#define ERROR_DDR_SLEEP -1
41#define ERROR_DDR_WAKE -2
42#define ERROR_NO_QUIESCE -3
43
44#define CORE_RESTARTABLE 0
45#define CORE_NOT_RESTARTABLE 1
46
47#define RESET_RETRY_CNT 800
48
49.global soc_init_lowlevel
50.global soc_init_percpu
51.global _soc_core_release
52.global _soc_core_restart
53.global _soc_ck_disabled
54.global _soc_sys_reset
55.global _soc_sys_off
56.global _getGICD_BaseAddr
57.global _getGICC_BaseAddr
58.global _soc_set_start_addr
59.global _soc_core_prep_off
60.global _soc_core_entr_off
61.global _soc_core_exit_off
62.global _soc_core_prep_stdby
63.global _soc_core_entr_stdby
64.global _soc_core_exit_stdby
65.global _soc_core_prep_pwrdn
66.global _soc_core_entr_pwrdn
67.global _soc_core_exit_pwrdn
68.global _soc_clstr_prep_stdby
69.global _soc_clstr_exit_stdby
70.global _soc_clstr_prep_pwrdn
71.global _soc_clstr_exit_pwrdn
72.global _soc_sys_prep_stdby
73.global _soc_sys_exit_stdby
74.global _soc_sys_prep_pwrdn
75.global _soc_sys_pwrdn_wfi
76.global _soc_sys_exit_pwrdn
77
78/*
79 * This function initialize the soc.
80 * in: void
81 * out: void
82 */
83func soc_init_lowlevel
84 ret
85endfunc soc_init_lowlevel
86
87/*
88 * void soc_init_percpu(void)
89 * this function performs any soc-specific initialization that is needed on
90 * a per-core basis
91 * in: none
92 * out: none
93 * uses x0, x1, x2, x3
94 */
95func soc_init_percpu
96 mov x3, x30
97
98 bl plat_my_core_mask
99 mov x2, x0
100
101 /* see if this core is marked for prefetch disable */
102 mov x0, #PREFETCH_DIS_OFFSET
103 bl _get_global_data /* 0-1 */
104 tst x0, x2
105 b.eq 1f
106 bl _disable_ldstr_pfetch_A53 /* 0 */
1071:
108 mov x30, x3
109 ret
110endfunc soc_init_percpu
111
112/*
113 * part of CPU_ON
114 * this function releases a secondary core from reset
115 * in: x0 = core_mask_lsb
116 * out: none
117 * uses: x0, x1, x2, x3
118 */
119_soc_core_release:
120
121#if (TEST_BL31)
122 mov w2, w0
123 CoreMaskMsb w2, w3
124 /* x2 = core mask msb */
125#else
126 mov x2, x0
127#endif
128 /* write COREBCR */
129 ldr x1, =NXP_SCFG_ADDR
130 rev w3, w2
131 str w3, [x1, #SCFG_COREBCR_OFFSET]
132 isb
133
134 /* read-modify-write BRR */
135 mov x1, #NXP_DCFG_ADDR
136 ldr w2, [x1, #DCFG_BRR_OFFSET]
137 rev w3, w2
138 orr w3, w3, w0
139 rev w2, w3
140 str w2, [x1, #DCFG_BRR_OFFSET]
141 isb
142
143 /* send event */
144 sev
145 isb
146 ret
147
148
149/*
150 * part of CPU_ON
151 * this function restarts a core shutdown via _soc_core_entr_off
152 * in: x0 = core mask lsb (of the target cpu)
153 * out: x0 == 0, on success
154 * x0 != 0, on failure
155 * uses x0 ~ x5
156 */
157_soc_core_restart:
158 mov x5, x30
159 mov x3, x0
160
161 /* x3 = core mask lsb */
162 bl _getGICD_BaseAddr
163 mov x4, x0
164
165 /* x4 = GICD_BASE_ADDR */
166 /* enable forwarding of group 0 interrupts by setting GICD_CTLR[0] = 1 */
167 ldr w1, [x4, #GICD_CTLR_OFFSET]
168 orr w1, w1, #GICD_CTLR_EN_GRP0
169 str w1, [x4, #GICD_CTLR_OFFSET]
170 dsb sy
171 isb
172
173 /*
174 * fire SGI by writing to GICD_SGIR the following values:
175 * [25:24] = 0x0 (forward interrupt to the CPU interfaces specified in CPUTargetList field)
176 * [23:16] = core mask lsb[7:0] (forward interrupt to target cpu)
177 * [15] = 0 (forward SGI only if it is configured as group 0 interrupt)
178 * [3:0] = 0xF (interrupt ID = 15)
179 */
180 lsl w1, w3, #16
181 orr w1, w1, #0xF
182 str w1, [x4, #GICD_SGIR_OFFSET]
183 dsb sy
184 isb
185
186 /* load '0' on success */
187 mov x0, xzr
188
189 mov x30, x5
190 ret
191
192/*
193 * this function determines if a core is disabled via COREDISR
194 * in: w0 = core_mask_lsb
195 * out: w0 = 0, core not disabled
196 * w0 != 0, core disabled
197 * uses x0, x1, x2
198 */
199_soc_ck_disabled:
200
201 /* get base addr of dcfg block */
202 ldr x1, =NXP_DCFG_ADDR
203
204 /* read COREDISR */
205 ldr w1, [x1, #DCFG_COREDISR_OFFSET]
206 rev w2, w1
207
208 /* test core bit */
209 and w0, w2, w0
210 ret
211
212/*
213 * this function resets the system via SoC-specific methods
214 * in: none
215 * out: none
216 * uses x0, x1, x2, x3
217 */
218_soc_sys_reset:
219
220 ldr x2, =NXP_DCFG_ADDR
221
222 /* make sure the mask is cleared in the reset request mask register */
223 mov w1, wzr
224 str w1, [x2, #DCFG_RSTRQMR1_OFFSET]
225
226 /* x2 = NXP_DCFG_ADDR */
227
228 /* set the reset request */
229 ldr w1, =RSTCR_RESET_REQ
230 ldr x3, =DCFG_RSTCR_OFFSET
231 rev w0, w1
232 str w0, [x2, x3]
233
234 /* x2 = NXP_DCFG_ADDR */
235 /* x3 = DCFG_RSTCR_OFFSET */
236
237 /* just in case this address range is mapped as cacheable,
238 * flush the write out of the dcaches */
239 add x3, x2, x3
240 dc cvac, x3
241 dsb st
242 isb
243
244 /* Note: this function does not return */
2451:
246 wfi
247 b 1b
248
249
250/*
251 * part of SYSTEM_OFF
252 * this function turns off the SoC clocks
253 * Note: this function is not intended to return, and the only allowable
254 * recovery is POR
255 * in: none
256 * out: none
257 * uses x0 ~ x8
258 */
259_soc_sys_off:
260
261 /* mask interrupts at the core */
262 mrs x1, DAIF
263 mov x0, #DAIF_SET_MASK
264 orr x0, x1, x0
265 msr DAIF, x0
266
267 /* disable icache, dcache, mmu @ EL1 */
268 mov x1, #SCTLR_I_C_M_MASK
269 mrs x0, sctlr_el1
270 bic x0, x0, x1
271 msr sctlr_el1, x0
272
273 /* disable dcache for EL3 */
274 mrs x1, SCTLR_EL3
275 bic x1, x1, #SCTLR_C_MASK
276 /* make sure icache is enabled */
277 orr x1, x1, #SCTLR_I_MASK
278 msr SCTLR_EL3, x1
279 isb
280
281 /* set WFIL2_EN in SCFG_COREPMCR */
282 ldr x0, =SCFG_COREPMCR_OFFSET
283 ldr x1, =COREPMCR_WFIL2
284 bl write_reg_scfg
285
286 /* set OVRD_EN in RCPM2_POWMGTDCR */
287 ldr x0, =RCPM2_POWMGTDCR_OFFSET
288 ldr x1, =POWMGTDCR_OVRD_EN
289 bl write_reg_rcpm2
290
291 /* read IPPDEXPCR0 @ RCPM_IPPDEXPCR0 */
292 ldr x0, =RCPM_IPPDEXPCR0_OFFSET
293 bl read_reg_rcpm
294 mov x7, x0
295
296 /* build an override mask for IPSTPCR4/IPSTPACK4/DEVDISR5 */
297 mov x5, xzr
298 ldr x6, =IPPDEXPCR_MASK2
299 and x6, x6, x7
300 cbz x6, 1f
301
302 /* x5 = override mask
303 * x6 = IPPDEXPCR bits for DEVDISR5
304 * x7 = IPPDEXPCR */
305
306 /* get the overrides */
307 orr x4, x5, #DEVDISR5_I2C_1
308 tst x6, #IPPDEXPCR_I2C1
309 csel x5, x5, x4, EQ
310
311 orr x4, x5, #DEVDISR5_LPUART1
312 tst x6, #IPPDEXPCR_LPUART1
313 csel x5, x5, x4, EQ
314
315 orr x4, x5, #DEVDISR5_FLX_TMR
316 tst x6, #IPPDEXPCR_FLX_TMR1
317 csel x5, x5, x4, EQ
318
319 orr x4, x5, #DEVDISR5_OCRAM1
320 tst x6, #IPPDEXPCR_OCRAM1
321 csel x5, x5, x4, EQ
322
323 orr x4, x5, #DEVDISR5_GPIO
324 tst x6, #IPPDEXPCR_GPIO1
325 csel x5, x5, x4, EQ
3261:
327 /* store the DEVDISR5 override mask */
328 ldr x2, =BC_PSCI_BASE
329 add x2, x2, #AUX_01_DATA
330 str w5, [x2, #DEVDISR5_MASK_OFFSET]
331
332 /* build an override mask for IPSTPCR1/IPSTPACK1/DEVDISR2 */
333 mov x5, xzr
334 ldr x6, =IPPDEXPCR_MASK1
335 and x6, x6, x7
336 cbz x6, 2f
337
338 /* x5 = override mask */
339 /* x6 = IPPDEXPCR bits for DEVDISR2 */
340
341 /* get the overrides */
342 orr x4, x5, #DEVDISR2_FMAN1_MAC1
343 tst x6, #IPPDEXPCR_MAC1_1
344 csel x5, x5, x4, EQ
345
346 orr x4, x5, #DEVDISR2_FMAN1_MAC2
347 tst x6, #IPPDEXPCR_MAC1_2
348 csel x5, x5, x4, EQ
349
350 orr x4, x5, #DEVDISR2_FMAN1_MAC3
351 tst x6, #IPPDEXPCR_MAC1_3
352 csel x5, x5, x4, EQ
353
354 orr x4, x5, #DEVDISR2_FMAN1_MAC4
355 tst x6, #IPPDEXPCR_MAC1_4
356 csel x5, x5, x4, EQ
357
358 orr x4, x5, #DEVDISR2_FMAN1_MAC5
359 tst x6, #IPPDEXPCR_MAC1_5
360 csel x5, x5, x4, EQ
361
362 orr x4, x5, #DEVDISR2_FMAN1_MAC6
363 tst x6, #IPPDEXPCR_MAC1_6
364 csel x5, x5, x4, EQ
365
366 orr x4, x5, #DEVDISR2_FMAN1_MAC9
367 tst x6, #IPPDEXPCR_MAC1_9
368 csel x5, x5, x4, EQ
369
370 orr x4, x5, #DEVDISR2_FMAN1
371 tst x6, #IPPDEXPCR_FM1
372 csel x5, x5, x4, EQ
373
3742:
375 /* store the DEVDISR2 override mask */
376 ldr x2, =BC_PSCI_BASE
377 add x2, x2, #AUX_01_DATA
378 str w5, [x2, #DEVDISR2_MASK_OFFSET]
379
380 /* x5 = DEVDISR2 override mask */
381
382 /* write IPSTPCR0 - no overrides */
383 ldr x0, =RCPM2_IPSTPCR0_OFFSET
384 ldr x1, =IPSTPCR0_VALUE
385 bl write_reg_rcpm2
386
387 /* x5 = DEVDISR2 override mask */
388
389 /* write IPSTPCR1 - overrides possible */
390 ldr x0, =RCPM2_IPSTPCR1_OFFSET
391 ldr x1, =IPSTPCR1_VALUE
392 bic x1, x1, x5
393 bl write_reg_rcpm2
394
395 /* write IPSTPCR2 - no overrides */
396 ldr x0, =RCPM2_IPSTPCR2_OFFSET
397 ldr x1, =IPSTPCR2_VALUE
398 bl write_reg_rcpm2
399
400 /* write IPSTPCR3 - no overrides */
401 ldr x0, =RCPM2_IPSTPCR3_OFFSET
402 ldr x1, =IPSTPCR3_VALUE
403 bl write_reg_rcpm2
404
405 /* write IPSTPCR4 - overrides possible */
406 ldr x2, =BC_PSCI_BASE
407 add x2, x2, #AUX_01_DATA
408 ldr w6, [x2, #DEVDISR5_MASK_OFFSET]
409 ldr x0, =RCPM2_IPSTPCR4_OFFSET
410 ldr x1, =IPSTPCR4_VALUE
411 bic x1, x1, x6
412 bl write_reg_rcpm2
413
414 /* x5 = DEVDISR2 override mask */
415 /* x6 = DEVDISR5 override mask */
416
417 /* poll on IPSTPACK0 */
418 ldr x3, =RCPM2_IPSTPACKR0_OFFSET
419 ldr x4, =IPSTPCR0_VALUE
420 ldr x7, =IPSTPACK_RETRY_CNT
4213:
422 mov x0, x3
423 bl read_reg_rcpm2
424 cmp x0, x4
425 b.eq 14f
426 sub x7, x7, #1
427 cbnz x7, 3b
428
42914:
430 /* poll on IPSTPACK1 */
431 ldr x3, =IPSTPCR1_VALUE
432 ldr x7, =IPSTPACK_RETRY_CNT
433 bic x4, x3, x5
434 ldr x3, =RCPM2_IPSTPACKR1_OFFSET
4354:
436 mov x0, x3
437 bl read_reg_rcpm2
438 cmp x0, x4
439 b.eq 15f
440 sub x7, x7, #1
441 cbnz x7, 4b
442
44315:
444 /* poll on IPSTPACK2 */
445 ldr x3, =RCPM2_IPSTPACKR2_OFFSET
446 ldr x4, =IPSTPCR2_VALUE
447 ldr x7, =IPSTPACK_RETRY_CNT
4485:
449 mov x0, x3
450 bl read_reg_rcpm2
451 cmp x0, x4
452 b.eq 16f
453 sub x7, x7, #1
454 cbnz x7, 5b
455
45616:
457 /* poll on IPSTPACK3 */
458 ldr x3, =RCPM2_IPSTPACKR3_OFFSET
459 ldr x4, =IPSTPCR3_VALUE
460 ldr x7, =IPSTPACK_RETRY_CNT
4616:
462 mov x0, x3
463 bl read_reg_rcpm2
464 cmp x0, x4
465 b.eq 17f
466 sub x7, x7, #1
467 cbnz x7, 6b
468
46917:
470 /* poll on IPSTPACK4 */
471 ldr x3, =IPSTPCR4_VALUE
472 ldr x7, =IPSTPACK_RETRY_CNT
473 bic x4, x3, x6
474 ldr x3, =RCPM2_IPSTPACKR4_OFFSET
4757:
476 mov x0, x3
477 bl read_reg_rcpm2
478 cmp x0, x4
479 b.eq 18f
480 sub x7, x7, #1
481 cbnz x7, 7b
482
48318:
484 ldr x7, =BC_PSCI_BASE
485 add x7, x7, #AUX_01_DATA
486
487 /* x5 = DEVDISR2 override mask
488 * x6 = DEVDISR5 override mask
489 * x7 = [soc_data_area] */
490
491 /* DEVDISR1 - load new value */
492 mov x0, #DCFG_DEVDISR1_OFFSET
493 bl read_reg_dcfg
494 mov x0, #DCFG_DEVDISR1_OFFSET
495 ldr x1, =DEVDISR1_VALUE
496 bl write_reg_dcfg
497
498 /* DEVDISR2 - load new value */
499 mov x0, #DCFG_DEVDISR2_OFFSET
500 bl read_reg_dcfg
501 mov x0, #DCFG_DEVDISR2_OFFSET
502 ldr x1, =DEVDISR2_VALUE
503 bic x1, x1, x5
504 bl write_reg_dcfg
505
506 /* x6 = DEVDISR5 override mask */
507 /* x7 = [soc_data_area] */
508
509 /* DEVDISR3 - load new value */
510 mov x0, #DCFG_DEVDISR3_OFFSET
511 bl read_reg_dcfg
512 mov x0, #DCFG_DEVDISR3_OFFSET
513 ldr x1, =DEVDISR3_VALUE
514 bl write_reg_dcfg
515
516 /* DEVDISR4 - load new value */
517 mov x0, #DCFG_DEVDISR4_OFFSET
518 bl read_reg_dcfg
519 mov x0, #DCFG_DEVDISR4_OFFSET
520 ldr x1, =DEVDISR4_VALUE
521 bl write_reg_dcfg
522
523 /* DEVDISR5 - load new value */
524 mov x0, #DCFG_DEVDISR5_OFFSET
525 bl read_reg_dcfg
526 mov x0, #DCFG_DEVDISR5_OFFSET
527 ldr x1, =DEVDISR5_VALUE
528 bic x1, x1, x6
529 bl write_reg_dcfg
530
531 /* x7 = [soc_data_area] */
532
533 /* disable data prefetch */
534 mrs x0, CPUACTLR_EL1
535 bic x0, x0, #CPUACTLR_L1PCTL_MASK
536 msr CPUACTLR_EL1, x0
537
538 /* x6 = DEVDISR5 override mask */
539
540 /* setup registers for cache-only execution */
541 ldr x5, =IPSTPCR4_VALUE
542 bic x5, x5, x6
543 mov x6, #DDR_CNTRL_BASE_ADDR
544 mov x7, #DCSR_RCPM2_BASE
545 mov x8, #NXP_DCFG_ADDR
546 dsb sy
547 isb
548
549 /* set the DLL_LOCK cycle count */
550 ldr w1, [x6, #DDR_TIMING_CFG_4_OFFSET]
551 rev w2, w1
552 bic w2, w2, #DLL_LOCK_MASK
553 orr w2, w2, #DLL_LOCK_VALUE
554 rev w1, w2
555 str w1, [x6, #DDR_TIMING_CFG_4_OFFSET]
556
557 /* x5 = ipstpcr4 (IPSTPCR4_VALUE bic DEVDISR5_MASK)
558 * x6 = DDR_CNTRL_BASE_ADDR
559 * x7 = DCSR_RCPM2_BASE
560 * x8 = NXP_DCFG_ADDR */
561
562 /* enter the cache-only sequence - there is no return */
563 b final_shutdown
564
565
566/*
567 * part of CPU_OFF
568 * this function programs SoC & GIC registers in preparation for shutting down
569 * the core
570 * in: x0 = core mask lsb
571 * out: none
572 * uses x0 ~ x7
573 */
574_soc_core_prep_off:
575 mov x7, x30
576 mov x6, x0
577
578 /* make sure the smpen bit is set */
579 mrs x2, CORTEX_A53_ECTLR_EL1
580 orr x2, x2, #CPUECTLR_SMPEN_MASK
581 msr CORTEX_A53_ECTLR_EL1, x2
582 isb
583
584 /* configure the cpu interface */
585
586 /* disable signaling of ints */
587 bl _getGICC_BaseAddr // 0-1
588 mov x4, x0
589
590 ldr w3, [x4, #GICC_CTLR_OFFSET]
591 bic w3, w3, #GICC_CTLR_EN_GRP0
592 bic w3, w3, #GICC_CTLR_EN_GRP1
593 str w3, [x4, #GICC_CTLR_OFFSET]
594 dsb sy
595 isb
596
597 /*
598 * x3 = GICC_CTRL
599 * x4 = GICC_BASE_ADDR
600 * x6 = core mask
601 */
602
603 /* set the priority filter */
604 ldr w2, [x4, #GICC_PMR_OFFSET]
605 orr w2, w2, #GICC_PMR_FILTER
606 str w2, [x4, #GICC_PMR_OFFSET]
607
608 /* setup GICC_CTLR */
609 bic w3, w3, #GICC_CTLR_ACKCTL_MASK
610 orr w3, w3, #GICC_CTLR_FIQ_EN_MASK
611 orr w3, w3, #GICC_CTLR_EOImodeS_MASK
612 orr w3, w3, #GICC_CTLR_CBPR_MASK
613 str w3, [x4, #GICC_CTLR_OFFSET]
614
615 /* x3 = GICC_CTRL */
616 /* x4 = GICC_BASE_ADDR */
617
618 /* setup the banked-per-core GICD registers */
619 bl _getGICD_BaseAddr
620
621 /*
622 * x0 = GICD_BASE_ADDR
623 * x3 = GICC_CTRL
624 * x4 = GICC_BASE_ADDR
625 * x6 = core mask
626 */
627
628 /* define SGI15 as Grp0 */
629 ldr w2, [x0, #GICD_IGROUPR0_OFFSET]
630 bic w2, w2, #GICD_IGROUP0_SGI15
631 str w2, [x0, #GICD_IGROUPR0_OFFSET]
632
633 /* set priority of SGI 15 to highest... */
634 ldr w2, [x0, #GICD_IPRIORITYR3_OFFSET]
635 bic w2, w2, #GICD_IPRIORITY_SGI15_MASK
636 str w2, [x0, #GICD_IPRIORITYR3_OFFSET]
637
638 /* enable SGI 15 */
639 ldr w2, [x0, #GICD_ISENABLER0_OFFSET]
640 orr w2, w2, #GICD_ISENABLE0_SGI15
641 str w2, [x0, #GICD_ISENABLER0_OFFSET]
642
643 /* enable the cpu interface */
644 orr w3, w3, #GICC_CTLR_EN_GRP0
645 str w3, [x4, #GICC_CTLR_OFFSET]
646
647 /* x0 = GICD_BASE_ADDR
648 * x6 = core mask */
649
650 /* clear any pending SGIs */
651 add x0, x0, #GICD_CPENDSGIR3_OFFSET
652 ldr x2, =GICD_CPENDSGIR_CLR_MASK
653 str w2, [x0]
654
655 dsb sy
656 isb
657 mov x30, x7
658 ret
659
660/*
661 * part of CPU_OFF
662 * this function performs the final steps to shutdown the core
663 * in: x0 = core mask lsb
664 * out: none
665 * uses x0 ~ x5
666 */
667_soc_core_entr_off:
668 mov x5, x30
669 mov x4, x0
670
671 bl _getGICD_BaseAddr
672 mov x3, x0
673
674 /* x3 = GICD_BASE_ADDR */
675 /* x4 = core mask (lsb) */
676
6773:
678 /* enter low-power state by executing wfi */
679 wfi
680
681 /* x3 = GICD_BASE_ADDR */
682 /* x4 = core mask (lsb) */
683
684 /* see if we got hit by SGI 15 */
685 add x0, x3, #GICD_SPENDSGIR3_OFFSET
686 ldr w2, [x0]
687 and w2, w2, #GICD_SPENDSGIR3_SGI15_MASK
688 cbz w2, 4f
689
690 /* clear the pending SGI */
691 ldr x2, =GICD_CPENDSGIR_CLR_MASK
692 add x0, x3, #GICD_CPENDSGIR3_OFFSET
693 str w2, [x0]
6944:
695 /* check if core has been turned on */
696 mov x0, x4
697 bl _getCoreState
698
699 /* x0 = core state */
700 cmp x0, #CORE_WAKEUP
701 b.ne 3b
702
703 /* if we get here, then we have exited the wfi */
704 dsb sy
705 isb
706 mov x30, x5
707 ret
708
709/*
710 * part of CPU_OFF
711 * this function starts the process of starting a core back up
712 * in: x0 = core mask lsb
713 * out: none
714 * uses x0 ~ x5
715 */
716_soc_core_exit_off:
717 mov x5, x30
718 mov x4, x0
719
720 /* x4 = core mask */
721
722 bl _getGICC_BaseAddr
723 mov x2, x0
724
725 /* read GICC_IAR */
726 ldr w0, [x2, #GICC_IAR_OFFSET]
727
728 /* write GICC_EIOR - signal end-of-interrupt */
729 str w0, [x2, #GICC_EOIR_OFFSET]
730
731 /* write GICC_DIR - disable interrupt */
732 str w0, [x2, #GICC_DIR_OFFSET]
733
734 /* x2 = GICC_BASE_ADDR */
735
736 /* disable signaling of grp0 ints */
737 ldr w1, [x2, #GICC_CTLR_OFFSET]
738 bic w1, w1, #GICC_CTLR_EN_GRP0
739 str w1, [x2, #GICC_CTLR_OFFSET]
740
741 dsb sy
742 isb
743 mov x30, x5
744 ret
745
746/*
747 * this function loads a 64-bit execution address of the core in the soc registers
748 * BOOTLOCPTRL/H
749 * in: x0, 64-bit address to write to BOOTLOCPTRL/H
750 * uses x0, x1, x2, x3
751 */
752_soc_set_start_addr:
753 /* get the 64-bit base address of the scfg block */
754 ldr x2, =NXP_SCFG_ADDR
755
756 /* write the 32-bit BOOTLOCPTRL register (offset 0x604 in the scfg block) */
757 mov x1, x0
758 rev w3, w1
759 str w3, [x2, #SCFG_BOOTLOCPTRL_OFFSET]
760
761 /* write the 32-bit BOOTLOCPTRH register (offset 0x600 in the scfg block) */
762 lsr x1, x0, #32
763 rev w3, w1
764 str w3, [x2, #SCFG_BOOTLOCPTRH_OFFSET]
765 ret
766
767/*
768 * part of CPU_SUSPEND
769 * this function puts the calling core into standby state
770 * in: x0 = core mask lsb
771 * out: none
772 * uses x0
773 */
774_soc_core_entr_stdby:
775 dsb sy
776 isb
777 wfi
778
779 ret
780
781/*
782 * part of CPU_SUSPEND
783 * this function performs SoC-specific programming prior to standby
784 * in: x0 = core mask lsb
785 * out: none
786 * uses x0, x1
787 */
788_soc_core_prep_stdby:
789 /* clear CORTEX_A53_ECTLR_EL1[2:0] */
790 mrs x1, CORTEX_A53_ECTLR_EL1
791 bic x1, x1, #CPUECTLR_TIMER_MASK
792 msr CORTEX_A53_ECTLR_EL1, x1
793
794 ret
795
796/*
797 * part of CPU_SUSPEND
798 * this function performs any SoC-specific cleanup after standby state
799 * in: x0 = core mask lsb
800 * out: none
801 * uses none
802 */
803_soc_core_exit_stdby:
804 ret
805
806/*
807 * part of CPU_SUSPEND
808 * this function performs SoC-specific programming prior to power-down
809 * in: x0 = core mask lsb
810 * out: none
811 * uses x0, x1
812 */
813_soc_core_prep_pwrdn:
814 /* make sure the smp bit is set */
815 mrs x1, CORTEX_A53_ECTLR_EL1
816 orr x1, x1, #CPUECTLR_SMPEN_MASK
817 msr CORTEX_A53_ECTLR_EL1, x1
818 isb
819
820 ret
821
822/*
823 * part of CPU_SUSPEND
824 * this function puts the calling core into a power-down state
825 * in: x0 = core mask lsb
826 * out: none
827 * uses x0
828 */
829_soc_core_entr_pwrdn:
830 dsb sy
831 isb
832 wfi
833
834 ret
835
836/*
837 * part of CPU_SUSPEND
838 * this function performs any SoC-specific cleanup after power-down
839 * in: x0 = core mask lsb
840 * out: none
841 * uses none
842 */
843_soc_core_exit_pwrdn:
844 ret
845
846
847/*
848 * part of CPU_SUSPEND
849 * this function performs SoC-specific programming prior to standby
850 * in: x0 = core mask lsb
851 * out: none
852 * uses x0, x1
853 */
854_soc_clstr_prep_stdby:
855 /* clear CORTEX_A53_ECTLR_EL1[2:0] */
856 mrs x1, CORTEX_A53_ECTLR_EL1
857 bic x1, x1, #CPUECTLR_TIMER_MASK
858 msr CORTEX_A53_ECTLR_EL1, x1
859
860 ret
861
862/*
863 * part of CPU_SUSPEND
864 * this function performs any SoC-specific cleanup after standby state
865 * in: x0 = core mask lsb
866 * out: none
867 * uses none
868 */
869_soc_clstr_exit_stdby:
870 ret
871
872/*
873 * part of CPU_SUSPEND
874 * this function performs SoC-specific programming prior to power-down
875 * in: x0 = core mask lsb
876 * out: none
877 * uses x0, x1
878 */
879_soc_clstr_prep_pwrdn:
880 /* make sure the smp bit is set */
881 mrs x1, CORTEX_A53_ECTLR_EL1
882 orr x1, x1, #CPUECTLR_SMPEN_MASK
883 msr CORTEX_A53_ECTLR_EL1, x1
884 isb
885
886 ret
887
888/*
889 * part of CPU_SUSPEND
890 * this function performs any SoC-specific cleanup after power-down
891 * in: x0 = core mask lsb
892 * out: none
893 * uses none
894 */
895_soc_clstr_exit_pwrdn:
896 ret
897
898/*
899 * part of CPU_SUSPEND
900 * this function performs SoC-specific programming prior to standby
901 * in: x0 = core mask lsb
902 * out: none
903 * uses x0, x1
904 */
905_soc_sys_prep_stdby:
906 /* clear CORTEX_A53_ECTLR_EL1[2:0] */
907 mrs x1, CORTEX_A53_ECTLR_EL1
908 bic x1, x1, #CPUECTLR_TIMER_MASK
909 msr CORTEX_A53_ECTLR_EL1, x1
910
911 ret
912
913/*
914 * part of CPU_SUSPEND
915 * this function performs any SoC-specific cleanup after standby state
916 * in: x0 = core mask lsb
917 * out: none
918 * uses none
919 */
920_soc_sys_exit_stdby:
921 ret
922
923/*
924 * part of CPU_SUSPEND
925 * this function performs SoC-specific programming prior to
926 * suspend-to-power-down
927 * in: x0 = core mask lsb
928 * out: none
929 * uses x0, x1, x2, x3, x4
930 */
931_soc_sys_prep_pwrdn:
932 mov x4, x30
933 /* make sure the smp bit is set */
934 mrs x1, CORTEX_A53_ECTLR_EL1
935 orr x1, x1, #CPUECTLR_SMPEN_MASK
936 msr CORTEX_A53_ECTLR_EL1, x1
937 isb
938
939 /* set WFIL2_EN in SCFG_COREPMCR */
940 ldr x0, =SCFG_COREPMCR_OFFSET
941 ldr x1, =COREPMCR_WFIL2
942 bl write_reg_scfg // 0-3
943
944 /* set OVRD_EN in RCPM2_POWMGTDCR */
945 ldr x0, =RCPM2_POWMGTDCR_OFFSET
946 ldr x1, =POWMGTDCR_OVRD_EN
947 bl write_reg_rcpm2 // 0-3
948
949 mov x30, x4
950 ret
951/*
952 * part of CPU_SUSPEND
953 * this function puts the calling core, and potentially the soc, into a
954 * low-power state
955 * in: x0 = core mask lsb
956 * out: x0 = 0, success
957 * x0 < 0, failure
958 * uses x0 ~ x9
959 */
960_soc_sys_pwrdn_wfi:
961 mov x18, x30
962
963 /* read IPPDEXPCR0 @ RCPM_IPPDEXPCR0 */
964 ldr x0, =RCPM_IPPDEXPCR0_OFFSET
965 bl read_reg_rcpm
966 mov x7, x0
967
968 /* build an override mask for IPSTPCR4/IPSTPACK4/DEVDISR5 */
969 mov x5, xzr
970 ldr x6, =IPPDEXPCR_MASK2
971 and x6, x6, x7
972 cbz x6, 1f
973
974 /* x5 = override mask
975 * x6 = IPPDEXPCR bits for DEVDISR5
976 * x7 = IPPDEXPCR */
977
978 /* get the overrides */
979 orr x4, x5, #DEVDISR5_I2C_1
980 tst x6, #IPPDEXPCR_I2C1
981 csel x5, x5, x4, EQ
982
983 orr x4, x5, #DEVDISR5_LPUART1
984 tst x6, #IPPDEXPCR_LPUART1
985 csel x5, x5, x4, EQ
986
987 orr x4, x5, #DEVDISR5_FLX_TMR
988 tst x6, #IPPDEXPCR_FLX_TMR1
989 csel x5, x5, x4, EQ
990
991 orr x4, x5, #DEVDISR5_OCRAM1
992 tst x6, #IPPDEXPCR_OCRAM1
993 csel x5, x5, x4, EQ
994
995 orr x4, x5, #DEVDISR5_GPIO
996 tst x6, #IPPDEXPCR_GPIO1
997 csel x5, x5, x4, EQ
9981:
999 /* store the DEVDISR5 override mask */
1000 ldr x2, =BC_PSCI_BASE
1001 add x2, x2, #AUX_01_DATA
1002 str w5, [x2, #DEVDISR5_MASK_OFFSET]
1003
1004 /* build an override mask for IPSTPCR1/IPSTPACK1/DEVDISR2 */
1005 mov x5, xzr
1006 ldr x6, =IPPDEXPCR_MASK1
1007 and x6, x6, x7
1008 cbz x6, 2f
1009
1010 /* x5 = override mask */
1011 /* x6 = IPPDEXPCR bits for DEVDISR2 */
1012
1013 /* get the overrides */
1014 orr x4, x5, #DEVDISR2_FMAN1_MAC1
1015 tst x6, #IPPDEXPCR_MAC1_1
1016 csel x5, x5, x4, EQ
1017
1018 orr x4, x5, #DEVDISR2_FMAN1_MAC2
1019 tst x6, #IPPDEXPCR_MAC1_2
1020 csel x5, x5, x4, EQ
1021
1022 orr x4, x5, #DEVDISR2_FMAN1_MAC3
1023 tst x6, #IPPDEXPCR_MAC1_3
1024 csel x5, x5, x4, EQ
1025
1026 orr x4, x5, #DEVDISR2_FMAN1_MAC4
1027 tst x6, #IPPDEXPCR_MAC1_4
1028 csel x5, x5, x4, EQ
1029
1030 orr x4, x5, #DEVDISR2_FMAN1_MAC5
1031 tst x6, #IPPDEXPCR_MAC1_5
1032 csel x5, x5, x4, EQ
1033
1034 orr x4, x5, #DEVDISR2_FMAN1_MAC6
1035 tst x6, #IPPDEXPCR_MAC1_6
1036 csel x5, x5, x4, EQ
1037
1038 orr x4, x5, #DEVDISR2_FMAN1_MAC9
1039 tst x6, #IPPDEXPCR_MAC1_9
1040 csel x5, x5, x4, EQ
1041
1042 orr x4, x5, #DEVDISR2_FMAN1
1043 tst x6, #IPPDEXPCR_FM1
1044 csel x5, x5, x4, EQ
1045
10462:
1047 /* store the DEVDISR2 override mask */
1048 ldr x2, =BC_PSCI_BASE
1049 add x2, x2, #AUX_01_DATA
1050 str w5, [x2, #DEVDISR2_MASK_OFFSET]
1051
1052 /* x5 = DEVDISR2 override mask */
1053
1054 /* write IPSTPCR0 - no overrides */
1055 ldr x0, =RCPM2_IPSTPCR0_OFFSET
1056 ldr x1, =IPSTPCR0_VALUE
1057 bl write_reg_rcpm2
1058
1059 /* x5 = DEVDISR2 override mask */
1060
1061 /* write IPSTPCR1 - overrides possible */
1062 ldr x0, =RCPM2_IPSTPCR1_OFFSET
1063 ldr x1, =IPSTPCR1_VALUE
1064 bic x1, x1, x5
1065 bl write_reg_rcpm2
1066
1067 /* write IPSTPCR2 - no overrides */
1068 ldr x0, =RCPM2_IPSTPCR2_OFFSET
1069 ldr x1, =IPSTPCR2_VALUE
1070 bl write_reg_rcpm2
1071
1072 /* write IPSTPCR3 - no overrides */
1073 ldr x0, =RCPM2_IPSTPCR3_OFFSET
1074 ldr x1, =IPSTPCR3_VALUE
1075 bl write_reg_rcpm2
1076
1077 /* write IPSTPCR4 - overrides possible */
1078 ldr x2, =BC_PSCI_BASE
1079 add x2, x2, #AUX_01_DATA
1080 ldr w6, [x2, #DEVDISR5_MASK_OFFSET]
1081 ldr x0, =RCPM2_IPSTPCR4_OFFSET
1082 ldr x1, =IPSTPCR4_VALUE
1083 bic x1, x1, x6
1084 bl write_reg_rcpm2
1085
1086 /* x5 = DEVDISR2 override mask */
1087 /* x6 = DEVDISR5 override mask */
1088
1089 /* poll on IPSTPACK0 */
1090 ldr x3, =RCPM2_IPSTPACKR0_OFFSET
1091 ldr x4, =IPSTPCR0_VALUE
1092 ldr x7, =IPSTPACK_RETRY_CNT
10933:
1094 mov x0, x3
1095 bl read_reg_rcpm2
1096 cmp x0, x4
1097 b.eq 14f
1098 sub x7, x7, #1
1099 cbnz x7, 3b
1100
110114:
1102 /* poll on IPSTPACK1 */
1103 ldr x3, =IPSTPCR1_VALUE
1104 ldr x7, =IPSTPACK_RETRY_CNT
1105 bic x4, x3, x5
1106 ldr x3, =RCPM2_IPSTPACKR1_OFFSET
11074:
1108 mov x0, x3
1109 bl read_reg_rcpm2
1110 cmp x0, x4
1111 b.eq 15f
1112 sub x7, x7, #1
1113 cbnz x7, 4b
1114
111515:
1116 /* poll on IPSTPACK2 */
1117 ldr x3, =RCPM2_IPSTPACKR2_OFFSET
1118 ldr x4, =IPSTPCR2_VALUE
1119 ldr x7, =IPSTPACK_RETRY_CNT
11205:
1121 mov x0, x3
1122 bl read_reg_rcpm2
1123 cmp x0, x4
1124 b.eq 16f
1125 sub x7, x7, #1
1126 cbnz x7, 5b
1127
112816:
1129 /* poll on IPSTPACK3 */
1130 ldr x3, =RCPM2_IPSTPACKR3_OFFSET
1131 ldr x4, =IPSTPCR3_VALUE
1132 ldr x7, =IPSTPACK_RETRY_CNT
11336:
1134 mov x0, x3
1135 bl read_reg_rcpm2
1136 cmp x0, x4
1137 b.eq 17f
1138 sub x7, x7, #1
1139 cbnz x7, 6b
1140
114117:
1142 /* poll on IPSTPACK4 */
1143 ldr x3, =IPSTPCR4_VALUE
1144 ldr x7, =IPSTPACK_RETRY_CNT
1145 bic x4, x3, x6
1146 ldr x3, =RCPM2_IPSTPACKR4_OFFSET
11477:
1148 mov x0, x3
1149 bl read_reg_rcpm2
1150 cmp x0, x4
1151 b.eq 18f
1152 sub x7, x7, #1
1153 cbnz x7, 7b
1154
115518:
1156 ldr x7, =BC_PSCI_BASE
1157 add x7, x7, #AUX_01_DATA
1158
1159 /* x5 = DEVDISR2 override mask
1160 * x6 = DEVDISR5 override mask
1161 * x7 = [soc_data_area] */
1162
1163 /* save DEVDISR1 and load new value */
1164 mov x0, #DCFG_DEVDISR1_OFFSET
1165 bl read_reg_dcfg
1166 mov w13, w0
1167 mov x0, #DCFG_DEVDISR1_OFFSET
1168 ldr x1, =DEVDISR1_VALUE
1169 bl write_reg_dcfg
1170
1171 /* save DEVDISR2 and load new value */
1172 mov x0, #DCFG_DEVDISR2_OFFSET
1173 bl read_reg_dcfg
1174 mov w14, w0
1175 mov x0, #DCFG_DEVDISR2_OFFSET
1176 ldr x1, =DEVDISR2_VALUE
1177 bic x1, x1, x5
1178 bl write_reg_dcfg
1179
1180 /* x6 = DEVDISR5 override mask */
1181 /* x7 = [soc_data_area] */
1182
1183 /* save DEVDISR3 and load new value */
1184 mov x0, #DCFG_DEVDISR3_OFFSET
1185 bl read_reg_dcfg
1186 mov w15, w0
1187 mov x0, #DCFG_DEVDISR3_OFFSET
1188 ldr x1, =DEVDISR3_VALUE
1189 bl write_reg_dcfg
1190
1191 /* save DEVDISR4 and load new value */
1192 mov x0, #DCFG_DEVDISR4_OFFSET
1193 bl read_reg_dcfg
1194 mov w16, w0
1195 mov x0, #DCFG_DEVDISR4_OFFSET
1196 ldr x1, =DEVDISR4_VALUE
1197 bl write_reg_dcfg
1198
1199 /* save DEVDISR5 and load new value */
1200 mov x0, #DCFG_DEVDISR5_OFFSET
1201 bl read_reg_dcfg
1202 mov w17, w0
1203 mov x0, #DCFG_DEVDISR5_OFFSET
1204 ldr x1, =DEVDISR5_VALUE
1205 bic x1, x1, x6
1206 bl write_reg_dcfg
1207
1208 /* x7 = [soc_data_area] */
1209
1210 /* save cpuactlr and disable data prefetch */
1211 mrs x0, CPUACTLR_EL1
1212 str w0, [x7, #CPUACTLR_DATA_OFFSET]
1213 bic x0, x0, #CPUACTLR_L1PCTL_MASK
1214 msr CPUACTLR_EL1, x0
1215
1216 /* x6 = DEVDISR5 override mask */
1217
1218 /* setup registers for cache-only execution */
1219 ldr x5, =IPSTPCR4_VALUE
1220 bic x5, x5, x6
1221 mov x6, #DDR_CNTRL_BASE_ADDR
1222 mov x7, #DCSR_RCPM2_BASE
1223 mov x8, #NXP_DCFG_ADDR
1224 dsb sy
1225 isb
1226
1227 /* set the DLL_LOCK cycle count */
1228 ldr w1, [x6, #DDR_TIMING_CFG_4_OFFSET]
1229 rev w2, w1
1230 bic w2, w2, #DLL_LOCK_MASK
1231 orr w2, w2, #DLL_LOCK_VALUE
1232 rev w1, w2
1233 str w1, [x6, #DDR_TIMING_CFG_4_OFFSET]
1234
1235 /*
1236 * x5 = ipstpcr4 (IPSTPCR4_VALUE bic DEVDISR5_MASK)
1237 * x6 = DDR_CNTRL_BASE_ADDR
1238 * x7 = DCSR_RCPM2_BASE
1239 * x8 = NXP_DCFG_ADDR
1240 * w13 = DEVDISR1 saved value
1241 * w14 = DEVDISR2 saved value
1242 * w15 = DEVDISR3 saved value
1243 * w16 = DEVDISR4 saved value
1244 * w17 = DEVDISR5 saved value
1245 */
1246
1247 /* enter the cache-only sequence */
1248 mov x9, #CORE_RESTARTABLE
1249 bl final_pwrdown
1250
1251 /* when we are here, the core has come out of wfi and the SoC is back up */
1252
1253 mov x30, x18
1254 ret
1255
1256/*
1257 * part of CPU_SUSPEND
1258 * this function performs any SoC-specific cleanup after power-down
1259 * in: x0 = core mask lsb
1260 * out: none
1261 * uses x0, x1
1262 */
1263_soc_sys_exit_pwrdn:
1264 /* clear POWMGTDCR */
1265 mov x1, #DCSR_RCPM2_BASE
1266 str wzr, [x1, #RCPM2_POWMGTDCR_OFFSET]
1267
1268 /* clear WFIL2_EN in SCFG_COREPMCR */
1269 mov x1, #NXP_SCFG_ADDR
1270 str wzr, [x1, #SCFG_COREPMCR_OFFSET]
1271
1272 ret
1273
1274/*
1275 * write a register in the SCFG block
1276 * in: x0 = offset
1277 * in: w1 = value to write
1278 * uses x0, x1, x2, x3
1279 */
1280write_reg_scfg:
1281 ldr x2, =NXP_SCFG_ADDR
1282 /* swap for BE */
1283 rev w3, w1
1284 str w3, [x2, x0]
1285 ret
1286/*
1287 * read a register in the SCFG block
1288 * in: x0 = offset
1289 * out: w0 = value read
1290 * uses x0, x1, x2
1291 */
1292read_reg_scfg:
1293 ldr x2, =NXP_SCFG_ADDR
1294 ldr w1, [x2, x0]
1295 /* swap for BE */
1296 rev w0, w1
1297 ret
1298
1299/*
1300 * write a register in the DCFG block
1301 * in: x0 = offset
1302 * in: w1 = value to write
1303 * uses x0, x1, x2, x3
1304 */
1305write_reg_dcfg:
1306 ldr x2, =NXP_DCFG_ADDR
1307 /* swap for BE */
1308 rev w3, w1
1309 str w3, [x2, x0]
1310 ret
1311
1312/*
1313 * read a register in the DCFG block
1314 * in: x0 = offset
1315 * out: w0 = value read
1316 * uses x0, x1, x2
1317 */
1318read_reg_dcfg:
1319 ldr x2, =NXP_DCFG_ADDR
1320 ldr w1, [x2, x0]
1321 /* swap for BE */
1322 rev w0, w1
1323 ret
1324
1325/*
1326 * write a register in the RCPM block
1327 * in: x0 = offset
1328 * in: w1 = value to write
1329 * uses x0, x1, x2, x3
1330 */
1331write_reg_rcpm:
1332 ldr x2, =NXP_RCPM_ADDR
1333 /* swap for BE */
1334 rev w3, w1
1335 str w3, [x2, x0]
1336 ret
1337
1338/*
1339 * read a register in the RCPM block
1340 * in: x0 = offset
1341 * out: w0 = value read
1342 * uses x0, x1, x2
1343 */
1344read_reg_rcpm:
1345 ldr x2, =NXP_RCPM_ADDR
1346 ldr w1, [x2, x0]
1347 /* swap for BE */
1348 rev w0, w1
1349 ret
1350
1351/*
1352 * write a register in the DCSR-RCPM2 block
1353 * in: x0 = offset
1354 * in: w1 = value to write
1355 * uses x0, x1, x2, x3
1356 */
1357write_reg_rcpm2:
1358 ldr x2, =DCSR_RCPM2_BASE
1359 /* swap for BE */
1360 rev w3, w1
1361 str w3, [x2, x0]
1362 ret
1363
1364/*
1365 * read a register in the DCSR-RCPM2 block
1366 * in: x0 = offset
1367 * out: w0 = value read
1368 * uses x0, x1, x2
1369 */
1370read_reg_rcpm2:
1371 ldr x2, =DCSR_RCPM2_BASE
1372 ldr w1, [x2, x0]
1373 /* swap for BE */
1374 rev w0, w1
1375 ret
1376
1377/*
1378 * this function returns the base address of the gic distributor
1379 * in: none
1380 * out: x0 = base address of gic distributor
1381 * uses x0, x1
1382 */
1383_getGICD_BaseAddr:
1384 /* read SVR and get the SoC version */
1385 mov x0, #NXP_DCFG_ADDR
1386 ldr w1, [x0, #DCFG_SVR_OFFSET]
1387 rev w0, w1
1388
1389 /* x0 = svr */
1390 and w0, w0, #SVR_MIN_VER_MASK
1391 cmp w0, #SVR_MINOR_VER_0
1392 b.ne 8f
1393
1394 /* load the gic base addresses for rev 1.0 parts */
1395 ldr x0, =NXP_GICD_4K_ADDR
1396 b 10f
13978:
1398 /* for rev 1.1 and later parts, the GIC base addresses */
1399 /* can be at 4k or 64k offsets */
1400
1401 /* read the scfg reg GIC400_ADDR_ALIGN */
1402 mov x0, #NXP_SCFG_ADDR
1403 ldr w1, [x0, #SCFG_GIC400_ADDR_ALIGN_OFFSET]
1404 rev w0, w1
1405
1406 /* x0 = GIC400_ADDR_ALIGN value */
1407 and x0, x0, #SCFG_GIC400_ADDR_ALIGN_4KMODE_MASK
1408 mov x1, #SCFG_GIC400_ADDR_ALIGN_4KMODE_EN
1409 cmp x0, x1
1410 b.ne 9f
1411
1412 /* load the base addresses for 4k offsets */
1413 ldr x0, =NXP_GICD_4K_ADDR
1414 b 10f
14159:
1416 /* load the base address for 64k offsets */
1417 ldr x0, =NXP_GICD_64K_ADDR
141810:
1419 ret
1420
1421/*
1422 * this function returns the base address of the gic distributor
1423 * in: none
1424 * out: x0 = base address of gic controller
1425 * uses x0, x1
1426 */
1427_getGICC_BaseAddr:
1428 /* read SVR and get the SoC version */
1429 mov x0, #NXP_DCFG_ADDR
1430 ldr w1, [x0, #DCFG_SVR_OFFSET]
1431 rev w0, w1
1432
1433 /* x0 = svr */
1434 and w0, w0, #SVR_MIN_VER_MASK
1435 cmp w0, #SVR_MINOR_VER_0
1436 b.ne 8f
1437
1438 /* load the gic base addresses for rev 1.0 parts */
1439 ldr x0, =NXP_GICC_4K_ADDR
1440 b 10f
14418:
1442 /* for rev 1.1 and later parts, the GIC base addresses */
1443 /* can be at 4k or 64k offsets */
1444
1445 /* read the scfg reg GIC400_ADDR_ALIGN */
1446 mov x0, #NXP_SCFG_ADDR
1447 ldr w1, [x0, #SCFG_GIC400_ADDR_ALIGN_OFFSET]
1448 rev w0, w1
1449
1450 /* x0 = GIC400_ADDR_ALIGN value */
1451 and x0, x0, #SCFG_GIC400_ADDR_ALIGN_4KMODE_MASK
1452 mov x1, #SCFG_GIC400_ADDR_ALIGN_4KMODE_EN
1453 cmp x0, x1
1454 b.ne 9f
1455
1456 /* load the base addresses for 4k offsets */
1457 ldr x0, =NXP_GICC_4K_ADDR
1458 b 10f
14599:
1460 /* load the base address for 64k offsets */
1461 ldr x0, =NXP_GICC_64K_ADDR
146210:
1463 ret
1464
1465/*
1466 * this function will pwrdown ddr and the final core - it will do this
1467 * by loading itself into the icache and then executing from there
1468 * in: x5 = ipstpcr4 (IPSTPCR4_VALUE bic DEVDISR5_MASK)
1469 * x6 = DDR_CNTRL_BASE_ADDR
1470 * x7 = DCSR_RCPM2_BASE
1471 * x8 = NXP_DCFG_ADDR
1472 * x9 = 0, restartable
1473 * = 1, non-restartable
1474 * w13 = DEVDISR1 saved value
1475 * w14 = DEVDISR2 saved value
1476 * w15 = DEVDISR3 saved value
1477 * w16 = DEVDISR4 saved value
1478 * w17 = DEVDISR5 saved value
1479 * out: none
1480 * uses x0 ~ x9
1481 */
1482
1483/* 4Kb aligned */
1484.align 12
1485final_pwrdown:
1486 mov x0, xzr
1487 b touch_line_0
1488start_line_0:
1489 mov x0, #1
1490 mov x2, #DDR_SDRAM_CFG_2_FRCSR /* put ddr in self refresh - start */
1491 ldr w3, [x6, #DDR_SDRAM_CFG_2_OFFSET]
1492 rev w4, w3
1493 orr w4, w4, w2
1494 rev w3, w4
1495 str w3, [x6, #DDR_SDRAM_CFG_2_OFFSET] /* put ddr in self refresh - end */
1496 orr w3, w5, #DEVDISR5_MEM /* quiesce ddr clocks - start */
1497 rev w4, w3
1498 str w4, [x7, #RCPM2_IPSTPCR4_OFFSET] /* quiesce ddr clocks - end */
1499
1500 mov w3, #DEVDISR5_MEM
1501 rev w3, w3 /* polling mask */
1502 mov x2, #DDR_SLEEP_RETRY_CNT /* poll on ipstpack4 - start */
1503touch_line_0:
1504 cbz x0, touch_line_1
1505
1506start_line_1:
1507 ldr w1, [x7, #RCPM2_IPSTPACKR4_OFFSET]
1508 tst w1, w3
1509 b.ne 1f
1510 subs x2, x2, #1
1511 b.gt start_line_1 /* poll on ipstpack4 - end */
1512
1513 /* if we get here, we have a timeout err */
1514 rev w4, w5
1515 str w4, [x7, #RCPM2_IPSTPCR4_OFFSET] /* re-enable ddr clks interface */
1516 mov x0, #ERROR_DDR_SLEEP /* load error code */
1517 b 2f
15181:
1519 str w4, [x8, #DCFG_DEVDISR5_OFFSET] /* disable ddr cntrlr clk in devdisr5 */
15205:
1521 wfi /* stop the final core */
1522
1523 cbnz x9, 5b /* if non-restartable, keep in wfi */
1524 rev w4, w5
1525 str w4, [x8, #DCFG_DEVDISR5_OFFSET] /* re-enable ddr in devdisr5 */
1526 str w4, [x7, #RCPM2_IPSTPCR4_OFFSET] /* re-enable ddr clk in ipstpcr4 */
1527touch_line_1:
1528 cbz x0, touch_line_2
1529
1530start_line_2:
1531 ldr w1, [x7, #RCPM2_IPSTPACKR4_OFFSET] /* poll on ipstpack4 - start */
1532 tst w1, w3
1533 b.eq 2f
1534 nop
1535 b start_line_2 /* poll on ipstpack4 - end */
15362:
1537 mov x2, #DDR_SDRAM_CFG_2_FRCSR /* take ddr out-of self refresh - start */
1538 ldr w3, [x6, #DDR_SDRAM_CFG_2_OFFSET]
1539 rev w4, w3
1540 bic w4, w4, w2
1541 rev w3, w4
1542 mov x1, #DDR_SLEEP_RETRY_CNT /* wait for ddr cntrlr clock - start */
15433:
1544 subs x1, x1, #1
1545 b.gt 3b /* wait for ddr cntrlr clock - end */
1546 str w3, [x6, #DDR_SDRAM_CFG_2_OFFSET] /* take ddr out-of self refresh - end */
1547 rev w1, w17
1548touch_line_2:
1549 cbz x0, touch_line_3
1550
1551start_line_3:
1552 str w1, [x8, #DCFG_DEVDISR5_OFFSET] /* reset devdisr5 */
1553 rev w1, w16
1554 str w1, [x8, #DCFG_DEVDISR4_OFFSET] /* reset devdisr4 */
1555 rev w1, w15
1556 str w1, [x8, #DCFG_DEVDISR3_OFFSET] /* reset devdisr3 */
1557 rev w1, w14
1558 str w1, [x8, #DCFG_DEVDISR2_OFFSET] /* reset devdisr2 */
1559 rev w1, w13
1560 str w1, [x8, #DCFG_DEVDISR1_OFFSET] /* reset devdisr1 */
1561 str wzr, [x7, #RCPM2_IPSTPCR4_OFFSET] /* reset ipstpcr4 */
1562 str wzr, [x7, #RCPM2_IPSTPCR3_OFFSET] /* reset ipstpcr3 */
1563 str wzr, [x7, #RCPM2_IPSTPCR2_OFFSET] /* reset ipstpcr2 */
1564 str wzr, [x7, #RCPM2_IPSTPCR1_OFFSET] /* reset ipstpcr1 */
1565 str wzr, [x7, #RCPM2_IPSTPCR0_OFFSET] /* reset ipstpcr0 */
1566 b continue_restart
1567touch_line_3:
1568 cbz x0, start_line_0
1569
1570/* execute here after ddr is back up */
1571continue_restart:
1572 /*
1573 * if x0 = 1, all is well
1574 * if x0 < 1, we had an error
1575 */
1576 cmp x0, #1
1577 b.ne 4f
1578 mov x0, #0
15794:
1580 ret
1581
1582/*
1583 * Note: there is no return from this function
1584 * this function will shutdown ddr and the final core - it will do this
1585 * by loading itself into the icache and then executing from there
1586 * in: x5 = ipstpcr4 (IPSTPCR4_VALUE bic DEVDISR5_MASK)
1587 * x6 = DDR_CNTRL_BASE_ADDR
1588 * x7 = DCSR_RCPM2_BASE
1589 * x8 = NXP_DCFG_ADDR
1590 * out: none
1591 * uses x0 ~ x8
1592 */
1593
1594/* 4Kb aligned */
1595.align 12
1596final_shutdown:
1597
1598 mov x0, xzr
1599 b touch_line0
1600start_line0:
1601 mov x0, #1
1602 mov x2, #DDR_SDRAM_CFG_2_FRCSR /* put ddr in self refresh - start */
1603 ldr w3, [x6, #DDR_SDRAM_CFG_2_OFFSET]
1604 rev w4, w3
1605 orr w4, w4, w2
1606 rev w3, w4
1607 str w3, [x6, #DDR_SDRAM_CFG_2_OFFSET] /* put ddr in self refresh - end */
1608 orr w3, w5, #DEVDISR5_MEM /* quiesce ddr clocks - start */
1609 rev w4, w3
1610 str w4, [x7, #RCPM2_IPSTPCR4_OFFSET] /* quiesce ddr clocks - end */
1611
1612 mov w3, #DEVDISR5_MEM
1613 rev w3, w3 /* polling mask */
1614 mov x2, #DDR_SLEEP_RETRY_CNT /* poll on ipstpack4 - start */
1615touch_line0:
1616 cbz x0, touch_line1
1617
1618start_line1:
1619 ldr w1, [x7, #RCPM2_IPSTPACKR4_OFFSET]
1620 tst w1, w3
1621 b.ne 1f
1622 subs x2, x2, #1
1623 b.gt start_line1 /* poll on ipstpack4 - end */
1624 nop
1625 nop
1626 nop
1627 nop
16281:
1629 str w4, [x8, #DCFG_DEVDISR5_OFFSET] /* disable ddr cntrlr clk in devdisr5 */
16305:
1631 wfi /* stop the final core */
1632 b 5b /* stay here until POR */
1633 nop
1634 nop
1635 nop
1636touch_line1:
1637 cbz x0, start_line0