Merge pull request #69 from sandrine-bailleux:sb/split-mmu-fcts-per-el
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 97d54f6..aeb54bc 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -203,7 +203,7 @@
 	 * Initialise the MMU
 	 * ---------------------------------------------
 	 */
-	bl	enable_mmu
+	bl	enable_mmu_el1
 
 	/* ---------------------------------------------
 	 * Give ourselves a stack allocated in Normal
diff --git a/common/bl_common.c b/common/bl_common.c
index 5361c38..86b0cc5 100644
--- a/common/bl_common.c
+++ b/common/bl_common.c
@@ -106,9 +106,7 @@
  */
 void __dead2 change_el(el_change_info_t *info)
 {
-	unsigned long current_el = read_current_el();
-
-	if (GET_EL(current_el) == MODE_EL3) {
+	if (IS_IN_EL3()) {
 		/*
 		 * We can go anywhere from EL3. So find where.
 		 * TODO: Lots to do if we are going non-secure.
@@ -551,7 +549,6 @@
 		       void *second_arg)
 {
 	el_change_info_t run_image_info;
-	unsigned long current_el = read_current_el();
 
 	/* Tell next EL what we want done */
 	run_image_info.args.arg0 = RUN_IMAGE;
@@ -565,7 +562,7 @@
 	 * to jump to a higher EL and issue an SMC. Contents of argY
 	 * will go into the general purpose register xY e.g. arg0->x0
 	 */
-	if (GET_EL(current_el) == MODE_EL3) {
+	if (IS_IN_EL3()) {
 		run_image_info.args.arg1 = (unsigned long) first_arg;
 		run_image_info.args.arg2 = (unsigned long) second_arg;
 	} else {
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 517e25a..67b452b 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -264,5 +264,10 @@
 extern void write_cptr_el2(unsigned long);
 extern void write_cptr_el3(unsigned long);
 
+#define IS_IN_EL(x) \
+	(GET_EL(read_current_el()) == MODE_EL##x)
+
+#define IS_IN_EL1() IS_IN_EL(1)
+#define IS_IN_EL3() IS_IN_EL(3)
 
 #endif /* __ARCH_HELPERS_H__ */
diff --git a/plat/fvp/aarch64/plat_common.c b/plat/fvp/aarch64/plat_common.c
index edeb6e0..099751d 100644
--- a/plat/fvp/aarch64/plat_common.c
+++ b/plat/fvp/aarch64/plat_common.c
@@ -47,81 +47,68 @@
 static unsigned long platform_config[CONFIG_LIMIT];
 
 /*******************************************************************************
- * Enable the MMU assuming that the pagetables have already been created
- *******************************************************************************/
-void enable_mmu()
-{
-	unsigned long mair, tcr, ttbr, sctlr;
-	unsigned long current_el = read_current_el();
-
-	/* Set the attributes in the right indices of the MAIR */
-	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
-	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
-				  ATTR_IWBWA_OWBWA_NTR_INDEX);
-
-	/*
-	 * Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32
-	 */
-	tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |
-		  TCR_RGN_INNER_WBA | TCR_T0SZ_4GB;
-
-	/* Set TTBR bits as well */
-	ttbr = (unsigned long) l1_xlation_table;
-
-	if (GET_EL(current_el) == MODE_EL3) {
-		assert((read_sctlr_el3() & SCTLR_M_BIT) == 0);
-
-		write_mair_el3(mair);
-		tcr |= TCR_EL3_RES1;
-		/* Invalidate EL3 TLBs */
-		tlbialle3();
-
-		write_tcr_el3(tcr);
-		write_ttbr0_el3(ttbr);
-
-		/* ensure all translation table writes have drained into memory,
-		 * the TLB invalidation is complete, and translation register
-		 * writes are committed before enabling the MMU
-		 */
-		dsb();
-		isb();
-
-		sctlr = read_sctlr_el3();
-		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
-		sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
-		write_sctlr_el3(sctlr);
-	} else {
-		assert((read_sctlr_el1() & SCTLR_M_BIT) == 0);
-
-		write_mair_el1(mair);
-		/* Invalidate EL1 TLBs */
-		tlbivmalle1();
-
-		write_tcr_el1(tcr);
-		write_ttbr0_el1(ttbr);
-
-		/* ensure all translation table writes have drained into memory,
-		 * the TLB invalidation is complete, and translation register
-		 * writes are committed before enabling the MMU
-		 */
-		dsb();
-		isb();
-
-		sctlr = read_sctlr_el1();
-		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
-		sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
-		write_sctlr_el1(sctlr);
+ * Macro generating the code for the function enabling the MMU in the given
+ * exception level, assuming that the pagetables have already been created.
+ *
+ *   _el:		Exception level at which the function will run
+ *   _tcr_extra:	Extra bits to set in the TCR register. This mask will
+ *			be OR'ed with the default TCR value.
+ *   _tlbi_fct:		Function to invalidate the TLBs at the current
+ *			exception level
+ ******************************************************************************/
+#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
+	void enable_mmu_el##_el(void)					\
+	{								\
+		uint64_t mair, tcr, ttbr;				\
+		uint32_t sctlr;						\
+									\
+		assert(IS_IN_EL(_el));					\
+		assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0);	\
+									\
+		/* Set attributes in the right indices of the MAIR */	\
+		mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);	\
+		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,		\
+				ATTR_IWBWA_OWBWA_NTR_INDEX);		\
+		write_mair_el##_el(mair);				\
+									\
+		/* Invalidate TLBs at the current exception level */	\
+		_tlbi_fct();						\
+									\
+		/* Set TCR bits as well. */				\
+		/* Inner & outer WBWA & shareable + T0SZ = 32 */	\
+		tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |	\
+			TCR_RGN_INNER_WBA | TCR_T0SZ_4GB;		\
+		tcr |= _tcr_extra;					\
+		write_tcr_el##_el(tcr);					\
+									\
+		/* Set TTBR bits as well */				\
+		ttbr = (uint64_t) l1_xlation_table;			\
+		write_ttbr0_el##_el(ttbr);				\
+									\
+		/* Ensure all translation table writes have drained */	\
+		/* into memory, the TLB invalidation is complete, */	\
+		/* and translation register writes are committed */	\
+		/* before enabling the MMU */				\
+		dsb();							\
+		isb();							\
+									\
+		sctlr = read_sctlr_el##_el();				\
+		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;	\
+		sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;			\
+		write_sctlr_el##_el(sctlr);				\
+									\
+		/* Ensure the MMU enable takes effect immediately */	\
+		isb();							\
 	}
-	/* ensure the MMU enable takes effect immediately */
-	isb();
 
-	return;
-}
+/* Define EL1 and EL3 variants of the function enabling the MMU */
+DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
+DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)
 
 /*
  * Table of regions to map using the MMU.
- * This doesn't include TZRAM as the 'mem_layout' argument passed to to
- * configure_mmu() will give the available subset of that,
+ * This doesn't include TZRAM as the 'mem_layout' argument passed to
+ * configure_mmu_elx() will give the available subset of that,
  */
 const mmap_region_t fvp_mmap[] = {
 	{ TZROM_BASE,	TZROM_SIZE,	MT_MEMORY | MT_RO | MT_SECURE },
@@ -139,28 +126,32 @@
 };
 
 /*******************************************************************************
- * Setup the pagetables as per the platform memory map & initialize the mmu
- *******************************************************************************/
-void configure_mmu(meminfo_t *mem_layout,
-		   unsigned long ro_start,
-		   unsigned long ro_limit,
-		   unsigned long coh_start,
-		   unsigned long coh_limit)
-{
-	mmap_add_region(mem_layout->total_base, mem_layout->total_size,
-				MT_MEMORY | MT_RW | MT_SECURE);
-	mmap_add_region(ro_start, ro_limit - ro_start,
-				MT_MEMORY | MT_RO | MT_SECURE);
-	mmap_add_region(coh_start, coh_limit - coh_start,
-				MT_DEVICE | MT_RW | MT_SECURE);
-
-	mmap_add(fvp_mmap);
-
-	init_xlat_tables();
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ ******************************************************************************/
+#define DEFINE_CONFIGURE_MMU_EL(_el)					\
+	void configure_mmu_el##_el(meminfo_t *mem_layout,		\
+				   unsigned long ro_start,		\
+				   unsigned long ro_limit,		\
+				   unsigned long coh_start,		\
+				   unsigned long coh_limit)		\
+	{								\
+		mmap_add_region(mem_layout->total_base,			\
+				mem_layout->total_size,			\
+				MT_MEMORY | MT_RW | MT_SECURE);		\
+		mmap_add_region(ro_start, ro_limit - ro_start,		\
+				MT_MEMORY | MT_RO | MT_SECURE);		\
+		mmap_add_region(coh_start, coh_limit - coh_start,	\
+				MT_DEVICE | MT_RW | MT_SECURE);		\
+		mmap_add(fvp_mmap);					\
+		init_xlat_tables();					\
+									\
+		enable_mmu_el##_el();					\
+	}
 
-	enable_mmu();
-	return;
-}
+/* Define EL1 and EL3 variants of the function initialising the MMU */
+DEFINE_CONFIGURE_MMU_EL(1)
+DEFINE_CONFIGURE_MMU_EL(3)
 
 /* Simple routine which returns a configuration variable value */
 unsigned long platform_get_cfgvar(unsigned int var_id)
diff --git a/plat/fvp/bl1_plat_setup.c b/plat/fvp/bl1_plat_setup.c
index fd03ec2..edd3f7b 100644
--- a/plat/fvp/bl1_plat_setup.c
+++ b/plat/fvp/bl1_plat_setup.c
@@ -138,9 +138,9 @@
 		cci_enable_coherency(read_mpidr());
 	}
 
-	configure_mmu(&bl1_tzram_layout,
-			TZROM_BASE,
-			TZROM_BASE + TZROM_SIZE,
-			BL1_COHERENT_RAM_BASE,
-			BL1_COHERENT_RAM_LIMIT);
+	configure_mmu_el3(&bl1_tzram_layout,
+			  TZROM_BASE,
+			  TZROM_BASE + TZROM_SIZE,
+			  BL1_COHERENT_RAM_BASE,
+			  BL1_COHERENT_RAM_LIMIT);
 }
diff --git a/plat/fvp/bl2_plat_setup.c b/plat/fvp/bl2_plat_setup.c
index 4c649eb..80bb52e 100644
--- a/plat/fvp/bl2_plat_setup.c
+++ b/plat/fvp/bl2_plat_setup.c
@@ -172,9 +172,9 @@
  ******************************************************************************/
 void bl2_plat_arch_setup()
 {
-	configure_mmu(&bl2_tzram_layout,
-		      BL2_RO_BASE,
-		      BL2_RO_LIMIT,
-		      BL2_COHERENT_RAM_BASE,
-		      BL2_COHERENT_RAM_LIMIT);
+	configure_mmu_el1(&bl2_tzram_layout,
+			  BL2_RO_BASE,
+			  BL2_RO_LIMIT,
+			  BL2_COHERENT_RAM_BASE,
+			  BL2_COHERENT_RAM_LIMIT);
 }
diff --git a/plat/fvp/bl31_plat_setup.c b/plat/fvp/bl31_plat_setup.c
index 5c00baa..baf7df1 100644
--- a/plat/fvp/bl31_plat_setup.c
+++ b/plat/fvp/bl31_plat_setup.c
@@ -172,9 +172,9 @@
  ******************************************************************************/
 void bl31_plat_arch_setup()
 {
-	configure_mmu(&bl2_to_bl31_args->bl31_meminfo,
-		      BL31_RO_BASE,
-		      BL31_RO_LIMIT,
-		      BL31_COHERENT_RAM_BASE,
-		      BL31_COHERENT_RAM_LIMIT);
+	configure_mmu_el3(&bl2_to_bl31_args->bl31_meminfo,
+			  BL31_RO_BASE,
+			  BL31_RO_LIMIT,
+			  BL31_COHERENT_RAM_BASE,
+			  BL31_COHERENT_RAM_LIMIT);
 }
diff --git a/plat/fvp/bl32_plat_setup.c b/plat/fvp/bl32_plat_setup.c
index 9fe8fe1..bb2b602 100644
--- a/plat/fvp/bl32_plat_setup.c
+++ b/plat/fvp/bl32_plat_setup.c
@@ -111,9 +111,9 @@
  ******************************************************************************/
 void bl32_plat_arch_setup()
 {
-	configure_mmu(&bl32_tzdram_layout,
-		      BL32_RO_BASE,
-		      BL32_RO_LIMIT,
-		      BL32_COHERENT_RAM_BASE,
-		      BL32_COHERENT_RAM_LIMIT);
+	configure_mmu_el1(&bl32_tzdram_layout,
+			  BL32_RO_BASE,
+			  BL32_RO_LIMIT,
+			  BL32_COHERENT_RAM_BASE,
+			  BL32_COHERENT_RAM_LIMIT);
 }
diff --git a/plat/fvp/platform.h b/plat/fvp/platform.h
index 6c28a14..40f780e 100644
--- a/plat/fvp/platform.h
+++ b/plat/fvp/platform.h
@@ -373,12 +373,18 @@
 extern void bl31_plat_arch_setup(void);
 extern int platform_setup_pm(const struct plat_pm_ops **);
 extern unsigned int platform_get_core_pos(unsigned long mpidr);
-extern void enable_mmu(void);
-extern void configure_mmu(struct meminfo *,
-			  unsigned long,
-			  unsigned long,
-			  unsigned long,
-			  unsigned long);
+extern void enable_mmu_el1(void);
+extern void enable_mmu_el3(void);
+extern void configure_mmu_el1(struct meminfo *mem_layout,
+			      unsigned long ro_start,
+			      unsigned long ro_limit,
+			      unsigned long coh_start,
+			      unsigned long coh_limit);
+extern void configure_mmu_el3(struct meminfo *mem_layout,
+			      unsigned long ro_start,
+			      unsigned long ro_limit,
+			      unsigned long coh_start,
+			      unsigned long coh_limit);
 extern unsigned long platform_get_cfgvar(unsigned int);
 extern int platform_config_setup(void);
 extern void plat_report_exception(unsigned long);
diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c
index 8f9bb4d..9f4ebf6 100644
--- a/services/std_svc/psci/psci_afflvl_on.c
+++ b/services/std_svc/psci/psci_afflvl_on.c
@@ -362,7 +362,7 @@
 	/*
 	 * Arch. management: Turn on mmu & restore architectural state
 	 */
-	enable_mmu();
+	enable_mmu_el3();
 
 	/*
 	 * All the platform specific actions for turning this cpu