refactor(cm): convert el1-ctx assembly offset entries to c structure

Currently the EL1 part of the context structure (el1_sysregs_t),
is coupled with feature flags reducing the context memory allocation
for platforms, that don't enable/support all the architectural
features at once.

Similar to the el2 context optimization commit-"d6af234" this patch
further improves this section by converting the assembly context-offset
entries into a c structure. It relies on garbage collection of the
linker removing unreferenced structures from memory, as well as aiding
in readability and future maintenance. Additionally, it eliminates
the #ifs usage in 'context_mgmt.c' source file.

Change-Id: If6075931cec994bc89231241337eccc7042c5ede
Signed-off-by: Jayanth Dodderi Chidanand <jayanthdodderi.chidanand@arm.com>
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 005dcaf..7c10506 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -7,6 +7,7 @@
 #ifndef CONTEXT_H
 #define CONTEXT_H
 
+#include <lib/el3_runtime/context_el1.h>
 #include <lib/el3_runtime/context_el2.h>
 #include <lib/el3_runtime/cpu_data.h>
 #include <lib/utils_def.h>
@@ -82,151 +83,10 @@
 #endif /* FFH_SUPPORT */
 
 /*******************************************************************************
- * Constants that allow assembler code to access members of and the
- * 'el1_sys_regs' structure at their correct offsets. Note that some of the
- * registers are only 32-bits wide but are stored as 64-bit values for
- * convenience
- ******************************************************************************/
-#define CTX_EL1_SYSREGS_OFFSET	(CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
-#define CTX_SPSR_EL1		U(0x0)
-#define CTX_ELR_EL1		U(0x8)
-#define CTX_SCTLR_EL1		U(0x10)
-#define CTX_TCR_EL1		U(0x18)
-#define CTX_CPACR_EL1		U(0x20)
-#define CTX_CSSELR_EL1		U(0x28)
-#define CTX_SP_EL1		U(0x30)
-#define CTX_ESR_EL1		U(0x38)
-#define CTX_TTBR0_EL1		U(0x40)
-#define CTX_TTBR1_EL1		U(0x48)
-#define CTX_MAIR_EL1		U(0x50)
-#define CTX_AMAIR_EL1		U(0x58)
-#define CTX_ACTLR_EL1		U(0x60)
-#define CTX_TPIDR_EL1		U(0x68)
-#define CTX_TPIDR_EL0		U(0x70)
-#define CTX_TPIDRRO_EL0		U(0x78)
-#define CTX_PAR_EL1		U(0x80)
-#define CTX_FAR_EL1		U(0x88)
-#define CTX_AFSR0_EL1		U(0x90)
-#define CTX_AFSR1_EL1		U(0x98)
-#define CTX_CONTEXTIDR_EL1	U(0xa0)
-#define CTX_VBAR_EL1		U(0xa8)
-#define CTX_MDCCINT_EL1		U(0xb0)
-#define CTX_MDSCR_EL1		U(0xb8)
-
-#define CTX_AARCH64_END		U(0xc0) /* Align to the next 16 byte boundary */
-
-/*
- * If the platform is AArch64-only, there is no need to save and restore these
- * AArch32 registers.
- */
-#if CTX_INCLUDE_AARCH32_REGS
-#define CTX_SPSR_ABT		(CTX_AARCH64_END + U(0x0))
-#define CTX_SPSR_UND		(CTX_AARCH64_END + U(0x8))
-#define CTX_SPSR_IRQ		(CTX_AARCH64_END + U(0x10))
-#define CTX_SPSR_FIQ		(CTX_AARCH64_END + U(0x18))
-#define CTX_DACR32_EL2		(CTX_AARCH64_END + U(0x20))
-#define CTX_IFSR32_EL2		(CTX_AARCH64_END + U(0x28))
-#define CTX_AARCH32_END		(CTX_AARCH64_END + U(0x30)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_AARCH32_END		CTX_AARCH64_END
-#endif /* CTX_INCLUDE_AARCH32_REGS */
-
-/*
- * If the timer registers aren't saved and restored, we don't have to reserve
- * space for them in the context
- */
-#if NS_TIMER_SWITCH
-#define CTX_CNTP_CTL_EL0	(CTX_AARCH32_END + U(0x0))
-#define CTX_CNTP_CVAL_EL0	(CTX_AARCH32_END + U(0x8))
-#define CTX_CNTV_CTL_EL0	(CTX_AARCH32_END + U(0x10))
-#define CTX_CNTV_CVAL_EL0	(CTX_AARCH32_END + U(0x18))
-#define CTX_CNTKCTL_EL1		(CTX_AARCH32_END + U(0x20))
-#define CTX_TIMER_SYSREGS_END	(CTX_AARCH32_END + U(0x30)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_TIMER_SYSREGS_END	CTX_AARCH32_END
-#endif /* NS_TIMER_SWITCH */
-
-#if ENABLE_FEAT_MTE2
-#define CTX_TFSRE0_EL1		(CTX_TIMER_SYSREGS_END + U(0x0))
-#define CTX_TFSR_EL1		(CTX_TIMER_SYSREGS_END + U(0x8))
-#define CTX_RGSR_EL1		(CTX_TIMER_SYSREGS_END + U(0x10))
-#define CTX_GCR_EL1		(CTX_TIMER_SYSREGS_END + U(0x18))
-#define CTX_MTE_REGS_END	(CTX_TIMER_SYSREGS_END + U(0x20)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_MTE_REGS_END	CTX_TIMER_SYSREGS_END
-#endif /* ENABLE_FEAT_MTE2 */
-
-#if ENABLE_FEAT_RAS
-#define CTX_DISR_EL1		(CTX_MTE_REGS_END + U(0x0))
-#define CTX_RAS_REGS_END	(CTX_MTE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_RAS_REGS_END        CTX_MTE_REGS_END
-#endif /* ENABLE_FEAT_RAS */
-
-#if ENABLE_FEAT_S1PIE
-#define CTX_PIRE0_EL1		(CTX_RAS_REGS_END + U(0x0))
-#define CTX_PIR_EL1		(CTX_RAS_REGS_END + U(0x8))
-#define CTX_S1PIE_REGS_END	(CTX_RAS_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_S1PIE_REGS_END	CTX_RAS_REGS_END
-#endif /* ENABLE_FEAT_S1PIE */
-
-#if ENABLE_FEAT_S1POE
-#define CTX_POR_EL1		(CTX_S1PIE_REGS_END + U(0x0))
-#define CTX_S1POE_REGS_END	(CTX_S1PIE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_S1POE_REGS_END	CTX_S1PIE_REGS_END
-#endif /* ENABLE_FEAT_S1POE */
-
-#if ENABLE_FEAT_S2POE
-#define CTX_S2POR_EL1		(CTX_S1POE_REGS_END + U(0x0))
-#define CTX_S2POE_REGS_END	(CTX_S1POE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_S2POE_REGS_END	CTX_S1POE_REGS_END
-#endif /* ENABLE_FEAT_S2POE */
-
-#if ENABLE_FEAT_TCR2
-#define CTX_TCR2_EL1		(CTX_S2POE_REGS_END + U(0x0))
-#define CTX_TCR2_REGS_END	(CTX_S2POE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_TCR2_REGS_END       CTX_S2POE_REGS_END
-#endif /* ENABLE_FEAT_TCR2 */
-
-#if ENABLE_TRF_FOR_NS
-#define CTX_TRFCR_EL1		(CTX_TCR2_REGS_END + U(0x0))
-#define CTX_TRF_REGS_END	(CTX_TCR2_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_TRF_REGS_END	CTX_TCR2_REGS_END
-#endif /* ENABLE_TRF_FOR_NS */
-
-#if ENABLE_FEAT_CSV2_2
-#define CTX_SCXTNUM_EL0		(CTX_TRF_REGS_END + U(0x0))
-#define CTX_SCXTNUM_EL1		(CTX_TRF_REGS_END + U(0x8))
-#define CTX_CSV2_2_REGS_END	(CTX_TRF_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_CSV2_2_REGS_END	CTX_TRF_REGS_END
-#endif /* ENABLE_FEAT_CSV2_2 */
-
-#if ENABLE_FEAT_GCS
-#define CTX_GCSCR_EL1		(CTX_CSV2_2_REGS_END + U(0x0))
-#define CTX_GCSCRE0_EL1		(CTX_CSV2_2_REGS_END + U(0x8))
-#define CTX_GCSPR_EL1		(CTX_CSV2_2_REGS_END + U(0x10))
-#define CTX_GCSPR_EL0		(CTX_CSV2_2_REGS_END + U(0x18))
-#define CTX_GCS_REGS_END	(CTX_CSV2_2_REGS_END + U(0x20)) /* Align to the next 16 byte boundary */
-#else
-#define CTX_GCS_REGS_END	CTX_CSV2_2_REGS_END
-#endif /* ENABLE_FEAT_GCS */
-
-/*
- * End of EL1 system registers.
- */
-#define CTX_EL1_SYSREGS_END	CTX_GCS_REGS_END
-
-/*******************************************************************************
  * Constants that allow assembler code to access members of and the 'fp_regs'
  * structure at their correct offsets.
  ******************************************************************************/
-# define CTX_FPREGS_OFFSET	(CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
+# define CTX_FPREGS_OFFSET	(CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
 #if CTX_INCLUDE_FPREGS
 #define CTX_FP_Q0		U(0x0)
 #define CTX_FP_Q1		U(0x10)
@@ -369,7 +229,6 @@
 
 /* Constants to determine the size of individual context structures */
 #define CTX_GPREG_ALL		(CTX_GPREGS_END >> DWORD_SHIFT)
-#define CTX_EL1_SYSREGS_ALL	(CTX_EL1_SYSREGS_END >> DWORD_SHIFT)
 
 #if CTX_INCLUDE_FPREGS
 # define CTX_FPREG_ALL		(CTX_FPREGS_END >> DWORD_SHIFT)
@@ -394,12 +253,6 @@
 DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
 
 /*
- * AArch64 EL1 system register context structure for preserving the
- * architectural state during world switches.
- */
-DEFINE_REG_STRUCT(el1_sysregs, CTX_EL1_SYSREGS_ALL);
-
-/*
  * AArch64 floating point register context structure for preserving
  * the floating point state during switches from one security state to
  * another.
@@ -446,7 +299,6 @@
 typedef struct cpu_context {
 	gp_regs_t gpregs_ctx;
 	el3_state_t el3state_ctx;
-	el1_sysregs_t el1_sysregs_ctx;
 
 #if CTX_INCLUDE_FPREGS
 	fp_regs_t fpregs_ctx;
@@ -461,6 +313,8 @@
 	pauth_t pauth_ctx;
 #endif
 
+	el1_sysregs_t el1_sysregs_ctx;
+
 #if CTX_INCLUDE_EL2_REGS
 	el2_sysregs_t el2_sysregs_ctx;
 #endif
@@ -510,9 +364,6 @@
 CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
 	assert_core_context_el3state_offset_mismatch);
 
-CASSERT(CTX_EL1_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el1_sysregs_ctx),
-	assert_core_context_el1_sys_offset_mismatch);
-
 #if CTX_INCLUDE_FPREGS
 CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx),
 	assert_core_context_fp_offset_mismatch);
diff --git a/include/lib/el3_runtime/context_el1.h b/include/lib/el3_runtime/context_el1.h
new file mode 100644
index 0000000..038de25
--- /dev/null
+++ b/include/lib/el3_runtime/context_el1.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CONTEXT_EL1_H
+#define CONTEXT_EL1_H
+
+#ifndef __ASSEMBLER__
+
+/*******************************************************************************
+ * EL1 Registers:
+ * AArch64 EL1 system register context structure for preserving the
+ * architectural state during world switches.
+ ******************************************************************************/
+
+typedef struct el1_common_regs {
+	uint64_t spsr_el1;
+	uint64_t elr_el1;
+
+#if (!ERRATA_SPECULATIVE_AT)
+	uint64_t sctlr_el1;
+	uint64_t tcr_el1;
+#endif /* ERRATA_SPECULATIVE_AT=0 */
+
+	uint64_t cpacr_el1;
+	uint64_t csselr_el1;
+	uint64_t sp_el1;
+	uint64_t esr_el1;
+	uint64_t ttbr0_el1;
+	uint64_t ttbr1_el1;
+	uint64_t mair_el1;
+	uint64_t amair_el1;
+	uint64_t actlr_el1;
+	uint64_t tpidr_el1;
+	uint64_t tpidr_el0;
+	uint64_t tpidrro_el0;
+	uint64_t par_el1;
+	uint64_t far_el1;
+	uint64_t afsr0_el1;
+	uint64_t afsr1_el1;
+	uint64_t contextidr_el1;
+	uint64_t vbar_el1;
+	uint64_t mdccint_el1;
+	uint64_t mdscr_el1;
+} el1_common_regs_t;
+
+typedef struct el1_aarch32_regs {
+	uint64_t spsr_abt;
+	uint64_t spsr_und;
+	uint64_t spsr_irq;
+	uint64_t spsr_fiq;
+	uint64_t dacr32_el2;
+	uint64_t ifsr32_el2;
+} el1_aarch32_regs_t;
+
+typedef struct el1_arch_timer_regs {
+	uint64_t cntp_ctl_el0;
+	uint64_t cntp_cval_el0;
+	uint64_t cntv_ctl_el0;
+	uint64_t cntv_cval_el0;
+	uint64_t cntkctl_el1;
+} el1_arch_timer_regs_t;
+
+typedef struct el1_mte2_regs {
+	uint64_t tfsre0_el1;
+	uint64_t tfsr_el1;
+	uint64_t rgsr_el1;
+	uint64_t gcr_el1;
+} el1_mte2_regs_t;
+
+typedef struct el1_ras_regs {
+	uint64_t disr_el1;
+} el1_ras_regs_t;
+
+typedef struct el1_s1pie_regs {
+	uint64_t pire0_el1;
+	uint64_t pir_el1;
+} el1_s1pie_regs_t;
+
+typedef struct el1_s1poe_regs {
+	uint64_t por_el1;
+} el1_s1poe_regs_t;
+
+typedef struct el1_s2poe_regs {
+	uint64_t s2por_el1;
+} el1_s2poe_regs_t;
+
+typedef struct el1_tcr2_regs {
+	uint64_t tcr2_el1;
+} el1_tcr2_regs_t;
+
+typedef struct el1_trf_regs {
+	uint64_t trfcr_el1;
+} el1_trf_regs_t;
+
+typedef struct el1_csv2_2_regs {
+	uint64_t scxtnum_el0;
+	uint64_t scxtnum_el1;
+} el1_csv2_2_regs_t;
+
+typedef struct el1_gcs_regs {
+	uint64_t gcscr_el1;
+	uint64_t gcscre0_el1;
+	uint64_t gcspr_el1;
+	uint64_t gcspr_el0;
+} el1_gcs_regs_t;
+
+typedef struct el1_sysregs {
+
+	el1_common_regs_t common;
+
+#if CTX_INCLUDE_AARCH32_REGS
+	el1_aarch32_regs_t el1_aarch32;
+#endif
+
+#if NS_TIMER_SWITCH
+	el1_arch_timer_regs_t arch_timer;
+#endif
+
+#if ENABLE_FEAT_MTE2
+	el1_mte2_regs_t mte2;
+#endif
+
+#if ENABLE_FEAT_RAS
+	el1_ras_regs_t ras;
+#endif
+
+#if ENABLE_FEAT_S1PIE
+	el1_s1pie_regs_t s1pie;
+#endif
+
+#if ENABLE_FEAT_S1POE
+	el1_s1poe_regs_t s1poe;
+#endif
+
+#if ENABLE_FEAT_S2POE
+	el1_s2poe_regs_t s2poe;
+#endif
+
+#if ENABLE_FEAT_TCR2
+	el1_tcr2_regs_t tcr2;
+#endif
+
+#if ENABLE_TRF_FOR_NS
+	el1_trf_regs_t trf;
+#endif
+
+#if ENABLE_FEAT_CSV2_2
+	el1_csv2_2_regs_t csv2_2;
+#endif
+
+#if ENABLE_FEAT_GCS
+	el1_gcs_regs_t gcs;
+#endif
+
+} el1_sysregs_t;
+
+
+/*
+ * Macros to access members related to individual features of the el1_sysregs_t
+ * structures.
+ */
+
+#define read_el1_ctx_common(ctx, reg)		(((ctx)->common).reg)
+
+#define write_el1_ctx_common(ctx, reg, val)	((((ctx)->common).reg)	\
+							= (uint64_t) (val))
+
+#if NS_TIMER_SWITCH
+#define read_el1_ctx_arch_timer(ctx, reg)		(((ctx)->arch_timer).reg)
+#define write_el1_ctx_arch_timer(ctx, reg, val)	((((ctx)->arch_timer).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_arch_timer(ctx, reg)		ULL(0)
+#define write_el1_ctx_arch_timer(ctx, reg, val)
+#endif /* NS_TIMER_SWITCH */
+
+#if CTX_INCLUDE_AARCH32_REGS
+#define read_el1_ctx_aarch32(ctx, reg)		(((ctx)->el1_aarch32).reg)
+#define write_el1_ctx_aarch32(ctx, reg, val)	((((ctx)->el1_aarch32).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_aarch32(ctx, reg)		ULL(0)
+#define write_el1_ctx_aarch32(ctx, reg, val)
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
+#if ENABLE_FEAT_MTE2
+#define read_el1_ctx_mte2(ctx, reg)		(((ctx)->mte2).reg)
+#define write_el1_ctx_mte2(ctx, reg, val)	((((ctx)->mte2).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_mte2(ctx, reg)		ULL(0)
+#define write_el1_ctx_mte2(ctx, reg, val)
+#endif /* ENABLE_FEAT_MTE2 */
+
+#if ENABLE_FEAT_RAS
+#define read_el1_ctx_ras(ctx, reg)		(((ctx)->ras).reg)
+#define write_el1_ctx_ras(ctx, reg, val)	((((ctx)->ras).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_ras(ctx, reg)		ULL(0)
+#define write_el1_ctx_ras(ctx, reg, val)
+#endif /* ENABLE_FEAT_RAS */
+
+#if ENABLE_FEAT_S1PIE
+#define read_el1_ctx_s1pie(ctx, reg)		(((ctx)->s1pie).reg)
+#define write_el1_ctx_s1pie(ctx, reg, val)	((((ctx)->s1pie).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_s1pie(ctx, reg)		ULL(0)
+#define write_el1_ctx_s1pie(ctx, reg, val)
+#endif /* ENABLE_FEAT_S1PIE */
+
+#if ENABLE_FEAT_S1POE
+#define read_el1_ctx_s1poe(ctx, reg)		(((ctx)->s1poe).reg)
+#define write_el1_ctx_s1poe(ctx, reg, val)	((((ctx)->s1poe).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_s1poe(ctx, reg)		ULL(0)
+#define write_el1_ctx_s1poe(ctx, reg, val)
+#endif /* ENABLE_FEAT_S1POE */
+
+#if ENABLE_FEAT_S2POE
+#define read_el1_ctx_s2poe(ctx, reg)		(((ctx)->s2poe).reg)
+#define write_el1_ctx_s2poe(ctx, reg, val)	((((ctx)->s2poe).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_s2poe(ctx, reg)		ULL(0)
+#define write_el1_ctx_s2poe(ctx, reg, val)
+#endif /* ENABLE_FEAT_S2POE */
+
+#if ENABLE_FEAT_TCR2
+#define read_el1_ctx_tcr2(ctx, reg)		(((ctx)->tcr2).reg)
+#define write_el1_ctx_tcr2(ctx, reg, val)	((((ctx)->tcr2).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_tcr2(ctx, reg)		ULL(0)
+#define write_el1_ctx_tcr2(ctx, reg, val)
+#endif /* ENABLE_FEAT_TCR2 */
+
+#if ENABLE_TRF_FOR_NS
+#define read_el1_ctx_trf(ctx, reg)		(((ctx)->trf).reg)
+#define write_el1_ctx_trf(ctx, reg, val)	((((ctx)->trf).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_trf(ctx, reg)		ULL(0)
+#define write_el1_ctx_trf(ctx, reg, val)
+#endif /* ENABLE_TRF_FOR_NS */
+
+#if ENABLE_FEAT_CSV2_2
+#define read_el1_ctx_csv2_2(ctx, reg)		(((ctx)->csv2_2).reg)
+#define write_el1_ctx_csv2_2(ctx, reg, val)	((((ctx)->csv2_2).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_csv2_2(ctx, reg)		ULL(0)
+#define write_el1_ctx_csv2_2(ctx, reg, val)
+#endif /* ENABLE_FEAT_CSV2_2 */
+
+#if ENABLE_FEAT_GCS
+#define read_el1_ctx_gcs(ctx, reg)		(((ctx)->gcs).reg)
+#define write_el1_ctx_gcs(ctx, reg, val)	((((ctx)->gcs).reg)	\
+							= (uint64_t) (val))
+#else
+#define read_el1_ctx_gcs(ctx, reg)		ULL(0)
+#define write_el1_ctx_gcs(ctx, reg, val)
+#endif /* ENABLE_FEAT_GCS */
+/******************************************************************************/
+#endif /* __ASSEMBLER__ */
+
+#endif /* CONTEXT_EL1_H */