fix(intel): update warm reset routine and bootscratch register usage
Agilex5 platform:
Boot scratch COLD6 register is meant for Customer use only.
So, use Intel specific COLD3 register with [5:2]bits to
determine the warm reset and SMP boot requests.
Also handle the unaligned DEVICE/IO memory store and load
in the assembly entrypoint startup code.
Agilex, Stratix10, N5X platforms:
Use only the LSB 4bits [3:0] of the boot scratch COLD6 register
to detect the warm reset request.
Change-Id: I4fd6e63fe0bd42ddcb4a3f81c7a7295bdc8ca65f
Signed-off-by: Girisha Dengi <girisha.dengi@intel.com>
Signed-off-by: Jit Loon Lim <jit.loon.lim@altera.com>
diff --git a/plat/intel/soc/common/aarch64/plat_helpers.S b/plat/intel/soc/common/aarch64/plat_helpers.S
index b3d5665..74ce279 100644
--- a/plat/intel/soc/common/aarch64/plat_helpers.S
+++ b/plat/intel/soc/common/aarch64/plat_helpers.S
@@ -98,7 +98,36 @@
endfunc plat_my_core_pos
func warm_reset_req
- str xzr, [x4]
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
+ /* Clear the markup before going for warm reset */
+ bic x2, x2, #BS_REG_MAGIC_KEYS_MASK
+ /* Check if the address is 64 bit aligned or not */
+ ldr x4, =L2_RESET_DONE_REG
+ tst x4, #ALIGN_CHECK_64BIT_MASK
+ b.ne unaligned_store
+ /* Device memory address is aligned, store the value directly */
+ str x2, [x4]
+ b continue_warm_reset
+
+ /* Unaligned store, use byte by byte method to store */
+unaligned_store:
+ strb w2, [x4]
+ lsr x2, x2, #8
+ add x4, x4, #1
+ strb w2, [x4]
+ lsr x2, x2, #8
+ add x4, x4, #1
+ strb w2, [x4]
+ lsr x2, x2, #8
+ add x4, x4, #1
+ strb w2, [x4]
+#else
+ /* Clear the markup before going for warm reset */
+ bic x2, x2, #BS_REG_MAGIC_KEYS_MASK
+ str x2, [x4]
+#endif
+
+continue_warm_reset:
bl plat_is_my_cpu_primary
cbz x0, cpu_in_wfi
mov_imm x1, PLAT_SEC_ENTRY
@@ -116,36 +145,71 @@
#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5
func plat_get_my_entrypoint
ldr x4, =L2_RESET_DONE_REG
- ldr x5, [x4]
+
+ /* Check if the address is 64 bit aligned or not */
+ tst x4, #ALIGN_CHECK_64BIT_MASK
+ b.ne unaligned_load
+
+ /* Device memory address is aligned, load the value directly */
+ ldr x1, [x4]
+ b events_check
+
+ /*
+ * It is unaligned device memory access. Read only LSB 32 bits
+ * byte by byte and combine them to get the 32 bit value.
+ */
+unaligned_load:
+ ldrb w1, [x4]
+ ldrb w2, [x4, #1]
+ ldrb w3, [x4, #2]
+ ldrb w4, [x4, #3]
+ orr x1, x1, x2, lsl #8
+ orr x1, x1, x3, lsl #16
+ orr x1, x1, x4, lsl #24
+
+events_check:
+ /* Keep a backup of the boot scratch register contents */
+ mov x2, x1
+
+ /* Mask and get the required bits */
+ and x1, x1, #BS_REG_MAGIC_KEYS_MASK
/* Check for warm reset request */
- ldr x1, =L2_RESET_DONE_STATUS
+ ldr x5, =L2_RESET_DONE_STATUS
cmp x1, x5
b.eq warm_reset_req
/* Check for SMP secondary cores boot request */
- ldr x1, =SMP_SEC_CORE_BOOT_REQ
+ ldr x5, =SMP_SEC_CORE_BOOT_REQ
cmp x1, x5
b.eq smp_request
- /* Otherwise it is cold reset */
+ /* Otherwise it is a cold reset request */
mov x0, #0
ret
+
smp_request:
/*
- * Return the address 'bl31_warm_entrypoint', which is passed to
- * 'psci_setup' routine as part of BL31 initialization.
+ * On the SMP boot request, return the address 'bl31_warm_entrypoint',
+ * which is passed to 'psci_setup' routine as part of BL31
+ * initialization.
*/
- mov_imm x1, PLAT_SEC_ENTRY
+ ldr x1, =PLAT_SEC_ENTRY
ldr x0, [x1]
- /* Clear the mark up before return */
- str xzr, [x4]
ret
endfunc plat_get_my_entrypoint
#else
func plat_get_my_entrypoint
ldr x4, =L2_RESET_DONE_REG
ldr x5, [x4]
+
+ /* Keep a backup of the boot scratch register contents */
+ mov x2, x5
+
+ /* Mask and get only the required bits */
+ and x5, x5, #BS_REG_MAGIC_KEYS_MASK
+
+ /* Check for warm reset request */
ldr x1, =L2_RESET_DONE_STATUS
cmp x1, x5
b.eq warm_reset_req