Fully initialise essential control registers

This patch updates the el3_arch_init_common macro so that it fully
initialises essential control registers rather then relying on hardware
to set the reset values.

The context management functions are also updated to fully initialise
the appropriate control registers when initialising the non-secure and
secure context structures and when preparing to leave EL3 for a lower
EL.

This gives better alignement with the ARM ARM which states that software
must initialise RES0 and RES1 fields with 0 / 1.

This patch also corrects the following typos:

"NASCR definitions" -> "NSACR definitions"

Change-Id: Ia8940b8351dc27bc09e2138b011e249655041cfc
Signed-off-by: David Cunado <david.cunado@arm.com>
diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S
index e1261ea..6fc00dd 100644
--- a/include/common/aarch32/el3_common_macros.S
+++ b/include/common/aarch32/el3_common_macros.S
@@ -16,10 +16,18 @@
 	 */
 	.macro el3_arch_init_common _exception_vectors
 	/* ---------------------------------------------------------------------
-	 * Enable the instruction cache and alignment checks
+	 * SCTLR has already been initialised - read current value before
+	 * modifying.
+	 *
+	 * SCTLR.I: Enable the instruction cache.
+	 *
+	 * SCTLR.A: Enable Alignment fault checking. All instructions that load
+	 *  or store one or more registers have an alignment check that the
+	 *  address being accessed is aligned to the size of the data element(s)
+	 *  being accessed.
 	 * ---------------------------------------------------------------------
 	 */
-	ldr	r1, =(SCTLR_RES1 | SCTLR_I_BIT | SCTLR_A_BIT)
+	ldr	r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
 	ldcopr	r0, SCTLR
 	orr	r0, r0, r1
 	stcopr	r0, SCTLR
@@ -34,13 +42,14 @@
 	stcopr	r0, MVBAR
 	isb
 
-	/* -----------------------------------------------------
-	 * Enable the SIF bit to disable instruction fetches
-	 * from Non-secure memory.
-	 * -----------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * Initialise SCR, setting all fields rather than relying on the hw.
+	 *
+	 * SCR.SIF: Enabled so that Secure state instruction fetches from
+	 *  Non-secure memory are not permitted.
+	 * ---------------------------------------------------------------------
 	 */
-	ldcopr	r0, SCR
-	orr	r0, r0, #SCR_SIF_BIT
+	ldr	r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
 	stcopr	r0, SCR
 
 	/* -----------------------------------------------------
@@ -51,32 +60,61 @@
 	cpsie   a
 	isb
 
-	/* Enable access to Advanced SIMD registers */
+	/* ---------------------------------------------------------------------
+	 * Initialise NSACR, setting all the fields, except for the
+	 * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
+	 * fields are architecturally UNKNOWN on reset.
+	 *
+	 * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
+	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
+	 *  field is set to allow access to Advanced SIMD and floating point
+	 *  features from both Security states.
+	 * ---------------------------------------------------------------------
+	 */
 	ldcopr	r0, NSACR
-	bic	r0, r0, #NSASEDIS_BIT
-	bic	r0, r0, #NSTRCDIS_BIT
-	orr	r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
+	and	r0, r0, #NSACR_IMP_DEF_MASK
+	orr	r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
 	stcopr	r0, NSACR
 	isb
 
-	/*
-	 * Enable access to Advanced SIMD, Floating point and to the Trace
-	 * functionality as well.
+	/* ---------------------------------------------------------------------
+	 * Initialise CPACR, setting all fields rather than relying on hw. Some
+	 * fields are architecturally UNKNOWN on reset.
+	 *
+	 * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
+	 *  to trace registers. Set to zero to allow access.
+	 *
+	 * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
+	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
+	 *  field is set to allow full access from PL0 and PL1 to floating-point
+	 *  and Advanced SIMD features.
+	 * ---------------------------------------------------------------------
 	 */
-	ldcopr	r0, CPACR
-	bic	r0, r0, #ASEDIS_BIT
-	bic	r0, r0, #TRCDIS_BIT
-	orr	r0, r0, #CPACR_ENABLE_FP_ACCESS
+	ldr	r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
 	stcopr	r0, CPACR
 	isb
 
-	vmrs	r0, FPEXC
-	orr	r0, r0, #FPEXC_EN_BIT
+	/* ---------------------------------------------------------------------
+	 * Initialise FPEXC, setting all fields rather than relying on hw. Some
+	 * fields are architecturally UNKNOWN on reset and are set to zero
+	 * except for field(s) listed below.
+	 *
+	 * FPEXC.EN: Enable access to Advanced SIMD and floating point features
+	 *  from all exception levels.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
 	vmsr	FPEXC, r0
 	isb
 
-	/* Disable secure self-hosted invasive debug. */
-	ldr	r0, =SDCR_DEF_VAL
+	/* ---------------------------------------------------------------------
+	 * Initialise SDCR, setting all the fields rather than relying on hw.
+	 *
+	 * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
+	 * Secure EL1 are disabled.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE))
 	stcopr	r0, SDCR
 
 	.endm
@@ -91,8 +129,9 @@
  * why this macro is parameterised ; each parameter allows to enable/disable
  * some actions.
  *
- *  _set_endian:
- *	Whether the macro needs to configure the endianness of data accesses.
+ *  _init_sctlr:
+ *	Whether the macro needs to initialise the SCTLR register including
+ *	configuring the endianness of data accesses.
  *
  *  _warm_boot_mailbox:
  *	Whether the macro needs to detect the type of boot (cold/warm). The
@@ -120,7 +159,7 @@
  * -----------------------------------------------------------------------------
  */
 	.macro el3_entrypoint_common					\
-		_set_endian, _warm_boot_mailbox, _secondary_cold_boot,	\
+		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
 		_init_memory, _init_c_runtime, _exception_vectors
 
 	/* Make sure we are in Secure Mode */
@@ -130,17 +169,27 @@
 	ASM_ASSERT(eq)
 #endif
 
-	.if \_set_endian
+	.if \_init_sctlr
 		/* -------------------------------------------------------------
-		 * Set the CPU endianness before doing anything that might
-		 * involve memory reads or writes.
+		 * This is the initialisation of SCTLR and so must ensure that
+		 * all fields are explicitly set rather than relying on hw. Some
+		 * fields reset to an IMPLEMENTATION DEFINED value.
+		 *
+		 * SCTLR.TE: Set to zero so that exceptions to an Exception
+		 *  Level executing at PL1 are taken to A32 state.
+		 *
+		 * SCTLR.EE: Set the CPU endianness before doing anything that
+		 *  might involve memory reads or writes. Set to zero to select
+		 *  Little Endian.
+		 *
+		 * SCTLR.V: Set to zero to select the normal exception vectors
+		 *  with base address held in VBAR.
 		 * -------------------------------------------------------------
 		 */
-		ldcopr	r0, SCTLR
-		bic	r0, r0, #SCTLR_EE_BIT
+		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | SCTLR_V_BIT))
 		stcopr	r0, SCTLR
 		isb
-	.endif /* _set_endian */
+	.endif /* _init_sctlr */
 
 	/* Switch to monitor mode */
 	cps	#MODE32_mon
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
index 674d52f..ed35df8 100644
--- a/include/common/aarch64/el3_common_macros.S
+++ b/include/common/aarch64/el3_common_macros.S
@@ -15,8 +15,20 @@
 	 */
 	.macro el3_arch_init_common _exception_vectors
 	/* ---------------------------------------------------------------------
-	 * Enable the instruction cache, stack pointer and data access alignment
-	 * checks
+	 * SCTLR_EL3 has already been initialised - read current value before
+	 * modifying.
+	 *
+	 * SCTLR_EL3.I: Enable the instruction cache.
+	 *
+	 * SCTLR_EL3.SA: Enable Stack Aligment check. A SP alignment fault
+	 *  exception is generated if a load or store instruction executed at
+	 *  EL3 uses the SP as the base address and the SP is not aligned to a
+	 *  16-byte boundary.
+	 *
+	 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
+	 *  load or store one or more registers have an alignment check that the
+	 *  address being accessed is aligned to the size of the data element(s)
+	 *  being accessed.
 	 * ---------------------------------------------------------------------
 	 */
 	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
@@ -46,19 +58,56 @@
 	isb
 
 	/* ---------------------------------------------------------------------
-	 * Early set RES1 bits in SCR_EL3. Set EA bit to catch both
-	 * External Aborts and SError Interrupts in EL3 and also the SIF bit
-	 * to disable instruction fetches from Non-secure memory.
+	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
+	 * All fields are architecturally UNKNOWN on reset. The following fields
+	 * do not change during the TF lifetime. The remaining fields are set to
+	 * zero here but are updated ahead of transitioning to a lower EL in the
+	 * function cm_init_context_common().
+	 *
+	 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
+	 *  EL2, EL1 and EL0 are not trapped to EL3.
+	 *
+	 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
+	 *  EL2, EL1 and EL0 are not trapped to EL3.
+	 *
+	 * SCR_EL3.SIF: Set to one to disable instruction fetches from
+	 *  Non-secure memory.
+	 *
+	 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
+	 *  both Security states and both Execution states.
+	 *
+	 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
+	 *  to EL3 when executing at any EL.
 	 * ---------------------------------------------------------------------
 	 */
-	mov	x0, #(SCR_RES1_BITS | SCR_EA_BIT | SCR_SIF_BIT)
+	mov	x0, #((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \
+			& ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
 	msr	scr_el3, x0
 
 	/* ---------------------------------------------------------------------
-	 * Disable secure self-hosted invasive debug.
+	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
+	 * Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
+	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
+	 *  disabled from all ELs in Secure state.
+	 *
+	 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
+	 *  privileged debug from S-EL1.
+	 *
+	 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
+	 *  access to the powerdown debug registers do not trap to EL3.
+	 *
+	 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
+	 *  debug registers, other than those registers that are controlled by
+	 *  MDCR_EL3.TDOSA.
+	 *
+	 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
+	 *  accesses to all Performance Monitors registers do not trap to EL3.
 	 * ---------------------------------------------------------------------
 	 */
-	mov_imm	x0, MDCR_DEF_VAL
+	mov_imm	x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) \
+			& ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT))
 	msr	mdcr_el3, x0
 
 	/* ---------------------------------------------------------------------
@@ -69,28 +118,20 @@
 	msr	daifclr, #DAIF_ABT_BIT
 
 	/* ---------------------------------------------------------------------
-	 * The initial state of the Architectural feature trap register
-	 * (CPTR_EL3) is unknown and it must be set to a known state. All
-	 * feature traps are disabled. Some bits in this register are marked as
-	 * reserved and should not be modified.
+	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
+	 * All fields are architecturally UNKNOWN on reset.
 	 *
-	 * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
-	 *  or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
+	 * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
+	 *  CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
 	 *
-	 * CPTR_EL3.TTA: This causes access to the Trace functionality to trap
-	 *  to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
-	 *  access to trace functionality is not supported, this bit is RES0.
+	 * CPTR_EL3.TTA: Set to zero so that System register accesses to the
+	 *  trace registers do not trap to EL3.
 	 *
-	 * CPTR_EL3.TFP: This causes instructions that access the registers
-	 *  associated with Floating Point and Advanced SIMD execution to trap
-	 *  to EL3 when executed from any exception level, unless trapped to EL1
-	 *  or EL2.
+	 * CPTR_EL3.TFP: Set to zero so that accesses to Advanced SIMD and
+	 *  floating-point functionality do not trap to EL3.
 	 * ---------------------------------------------------------------------
 	 */
-	mrs	x0, cptr_el3
-	bic	w0, w0, #TCPAC_BIT
-	bic	w0, w0, #TTA_BIT
-	bic	w0, w0, #TFP_BIT
+	mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
 	msr	cptr_el3, x0
 	.endm
 
@@ -104,8 +145,9 @@
  * why this macro is parameterised ; each parameter allows to enable/disable
  * some actions.
  *
- *  _set_endian:
- *	Whether the macro needs to configure the endianness of data accesses.
+ *  _init_sctlr:
+ *	Whether the macro needs to initialise SCTLR_EL3, including configuring
+ *      the endianness of data accesses.
  *
  *  _warm_boot_mailbox:
  *	Whether the macro needs to detect the type of boot (cold/warm). The
@@ -133,20 +175,35 @@
  * -----------------------------------------------------------------------------
  */
 	.macro el3_entrypoint_common					\
-		_set_endian, _warm_boot_mailbox, _secondary_cold_boot,	\
+		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
 		_init_memory, _init_c_runtime, _exception_vectors
 
-	.if \_set_endian
+	.if \_init_sctlr
 		/* -------------------------------------------------------------
-		 * Set the CPU endianness before doing anything that might
-		 * involve memory reads or writes.
+		 * This is the initialisation of SCTLR_EL3 and so must ensure
+		 * that all fields are explicitly set rather than relying on hw.
+		 * Some fields reset to an IMPLEMENTATION DEFINED value and
+		 * others are architecturally UNKNOWN on reset.
+		 *
+		 * SCTLR.EE: Set the CPU endianness before doing anything that
+		 *  might involve memory reads or writes. Set to zero to select
+		 *  Little Endian.
+		 *
+		 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
+		 *  force all memory regions that are writeable to be treated as
+		 *  XN (Execute-never). Set to zero so that this control has no
+		 *  effect on memory access permissions.
+		 *
+		 * SCTLR_EL3.SA: Set to zero to disable Stack Aligment check.
+		 *
+		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
 		 * -------------------------------------------------------------
 		 */
-		mrs	x0, sctlr_el3
-		bic	x0, x0, #SCTLR_EE_BIT
+		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
+				| SCTLR_SA_BIT | SCTLR_A_BIT))
 		msr	sctlr_el3, x0
 		isb
-	.endif /* _set_endian */
+	.endif /* _init_sctlr */
 
 	.if \_warm_boot_mailbox
 		/* -------------------------------------------------------------
diff --git a/include/common/ep_info.h b/include/common/ep_info.h
index 23d27c4..3f6213f 100644
--- a/include/common/ep_info.h
+++ b/include/common/ep_info.h
@@ -33,6 +33,7 @@
 			((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security))
 
 #define EP_EE_MASK	U(0x2)
+#define EP_EE_SHIFT	1
 #define EP_EE_LITTLE	U(0x0)
 #define EP_EE_BIG	U(0x2)
 #define EP_GET_EE(x) (x & EP_EE_MASK)