Merge pull request #71 from sandrine-bailleux:sb/fix-tsp-fvp-makefile
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 97d54f6..aeb54bc 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -203,7 +203,7 @@
* Initialise the MMU
* ---------------------------------------------
*/
- bl enable_mmu
+ bl enable_mmu_el1
/* ---------------------------------------------
* Give ourselves a stack allocated in Normal
diff --git a/common/bl_common.c b/common/bl_common.c
index 5361c38..86b0cc5 100644
--- a/common/bl_common.c
+++ b/common/bl_common.c
@@ -106,9 +106,7 @@
*/
void __dead2 change_el(el_change_info_t *info)
{
- unsigned long current_el = read_current_el();
-
- if (GET_EL(current_el) == MODE_EL3) {
+ if (IS_IN_EL3()) {
/*
* We can go anywhere from EL3. So find where.
* TODO: Lots to do if we are going non-secure.
@@ -551,7 +549,6 @@
void *second_arg)
{
el_change_info_t run_image_info;
- unsigned long current_el = read_current_el();
/* Tell next EL what we want done */
run_image_info.args.arg0 = RUN_IMAGE;
@@ -565,7 +562,7 @@
* to jump to a higher EL and issue an SMC. Contents of argY
* will go into the general purpose register xY e.g. arg0->x0
*/
- if (GET_EL(current_el) == MODE_EL3) {
+ if (IS_IN_EL3()) {
run_image_info.args.arg1 = (unsigned long) first_arg;
run_image_info.args.arg2 = (unsigned long) second_arg;
} else {
diff --git a/docs/porting-guide.md b/docs/porting-guide.md
index 8a024d8..e967b0e 100644
--- a/docs/porting-guide.md
+++ b/docs/porting-guide.md
@@ -631,8 +631,10 @@
the `bl31_args` structure pointed to by `bl2_to_bl31_args`.
Platform security components are configured if required. For the Base FVP the
-TZC-400 TrustZone controller is configured to grant secure and non-secure access
-to DRAM.
+TZC-400 TrustZone controller is configured to only grant non-secure access
+to DRAM. This avoids aliasing between secure and non-secure accesses in the
+TLB and cache - secure execution states can use the NS attributes in the
+MMU translation tables to access the DRAM.
This function is also responsible for initializing the storage abstraction layer
which is used to load further bootloader images.
diff --git a/drivers/arm/gic/gic_v2.c b/drivers/arm/gic/gic_v2.c
index b011900..00464cb 100644
--- a/drivers/arm/gic/gic_v2.c
+++ b/drivers/arm/gic/gic_v2.c
@@ -214,49 +214,43 @@
void gicd_set_isenabler(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISENABLER_SHIFT) - 1);
- unsigned int reg_val = gicd_read_isenabler(base, id);
- gicd_write_isenabler(base, id, reg_val | (1 << bit_num));
+ gicd_write_isenabler(base, id, (1 << bit_num));
}
void gicd_set_icenabler(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICENABLER_SHIFT) - 1);
- unsigned int reg_val = gicd_read_icenabler(base, id);
- gicd_write_icenabler(base, id, reg_val & ~(1 << bit_num));
+ gicd_write_icenabler(base, id, (1 << bit_num));
}
void gicd_set_ispendr(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISPENDR_SHIFT) - 1);
- unsigned int reg_val = gicd_read_ispendr(base, id);
- gicd_write_ispendr(base, id, reg_val | (1 << bit_num));
+ gicd_write_ispendr(base, id, (1 << bit_num));
}
void gicd_set_icpendr(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICPENDR_SHIFT) - 1);
- unsigned int reg_val = gicd_read_icpendr(base, id);
- gicd_write_icpendr(base, id, reg_val & ~(1 << bit_num));
+ gicd_write_icpendr(base, id, (1 << bit_num));
}
void gicd_set_isactiver(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISACTIVER_SHIFT) - 1);
- unsigned int reg_val = gicd_read_isactiver(base, id);
- gicd_write_isactiver(base, id, reg_val | (1 << bit_num));
+ gicd_write_isactiver(base, id, (1 << bit_num));
}
void gicd_set_icactiver(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICACTIVER_SHIFT) - 1);
- unsigned int reg_val = gicd_read_icactiver(base, id);
- gicd_write_icactiver(base, id, reg_val & ~(1 << bit_num));
+ gicd_write_icactiver(base, id, (1 << bit_num));
}
/*
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 517e25a..67b452b 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -264,5 +264,10 @@
extern void write_cptr_el2(unsigned long);
extern void write_cptr_el3(unsigned long);
+#define IS_IN_EL(x) \
+ (GET_EL(read_current_el()) == MODE_EL##x)
+
+#define IS_IN_EL1() IS_IN_EL(1)
+#define IS_IN_EL3() IS_IN_EL(3)
#endif /* __ARCH_HELPERS_H__ */
diff --git a/plat/fvp/aarch64/plat_common.c b/plat/fvp/aarch64/plat_common.c
index edeb6e0..099751d 100644
--- a/plat/fvp/aarch64/plat_common.c
+++ b/plat/fvp/aarch64/plat_common.c
@@ -47,81 +47,68 @@
static unsigned long platform_config[CONFIG_LIMIT];
/*******************************************************************************
- * Enable the MMU assuming that the pagetables have already been created
- *******************************************************************************/
-void enable_mmu()
-{
- unsigned long mair, tcr, ttbr, sctlr;
- unsigned long current_el = read_current_el();
-
- /* Set the attributes in the right indices of the MAIR */
- mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
- mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
- ATTR_IWBWA_OWBWA_NTR_INDEX);
-
- /*
- * Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32
- */
- tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |
- TCR_RGN_INNER_WBA | TCR_T0SZ_4GB;
-
- /* Set TTBR bits as well */
- ttbr = (unsigned long) l1_xlation_table;
-
- if (GET_EL(current_el) == MODE_EL3) {
- assert((read_sctlr_el3() & SCTLR_M_BIT) == 0);
-
- write_mair_el3(mair);
- tcr |= TCR_EL3_RES1;
- /* Invalidate EL3 TLBs */
- tlbialle3();
-
- write_tcr_el3(tcr);
- write_ttbr0_el3(ttbr);
-
- /* ensure all translation table writes have drained into memory,
- * the TLB invalidation is complete, and translation register
- * writes are committed before enabling the MMU
- */
- dsb();
- isb();
-
- sctlr = read_sctlr_el3();
- sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
- sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
- write_sctlr_el3(sctlr);
- } else {
- assert((read_sctlr_el1() & SCTLR_M_BIT) == 0);
-
- write_mair_el1(mair);
- /* Invalidate EL1 TLBs */
- tlbivmalle1();
-
- write_tcr_el1(tcr);
- write_ttbr0_el1(ttbr);
-
- /* ensure all translation table writes have drained into memory,
- * the TLB invalidation is complete, and translation register
- * writes are committed before enabling the MMU
- */
- dsb();
- isb();
-
- sctlr = read_sctlr_el1();
- sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
- sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
- write_sctlr_el1(sctlr);
+ * Macro generating the code for the function enabling the MMU in the given
+ * exception level, assuming that the pagetables have already been created.
+ *
+ * _el: Exception level at which the function will run
+ * _tcr_extra: Extra bits to set in the TCR register. This mask will
+ * be OR'ed with the default TCR value.
+ * _tlbi_fct: Function to invalidate the TLBs at the current
+ * exception level
+ ******************************************************************************/
+#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
+ void enable_mmu_el##_el(void) \
+ { \
+ uint64_t mair, tcr, ttbr; \
+ uint32_t sctlr; \
+ \
+ assert(IS_IN_EL(_el)); \
+ assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
+ \
+ /* Set attributes in the right indices of the MAIR */ \
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
+ ATTR_IWBWA_OWBWA_NTR_INDEX); \
+ write_mair_el##_el(mair); \
+ \
+ /* Invalidate TLBs at the current exception level */ \
+ _tlbi_fct(); \
+ \
+ /* Set TCR bits as well. */ \
+ /* Inner & outer WBWA & shareable + T0SZ = 32 */ \
+ tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
+ TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; \
+ tcr |= _tcr_extra; \
+ write_tcr_el##_el(tcr); \
+ \
+ /* Set TTBR bits as well */ \
+ ttbr = (uint64_t) l1_xlation_table; \
+ write_ttbr0_el##_el(ttbr); \
+ \
+ /* Ensure all translation table writes have drained */ \
+ /* into memory, the TLB invalidation is complete, */ \
+ /* and translation register writes are committed */ \
+ /* before enabling the MMU */ \
+ dsb(); \
+ isb(); \
+ \
+ sctlr = read_sctlr_el##_el(); \
+ sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \
+ sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; \
+ write_sctlr_el##_el(sctlr); \
+ \
+ /* Ensure the MMU enable takes effect immediately */ \
+ isb(); \
}
- /* ensure the MMU enable takes effect immediately */
- isb();
- return;
-}
+/* Define EL1 and EL3 variants of the function enabling the MMU */
+DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
+DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)
/*
* Table of regions to map using the MMU.
- * This doesn't include TZRAM as the 'mem_layout' argument passed to to
- * configure_mmu() will give the available subset of that,
+ * This doesn't include TZRAM as the 'mem_layout' argument passed to
+ * configure_mmu_elx() will give the available subset of that,
*/
const mmap_region_t fvp_mmap[] = {
{ TZROM_BASE, TZROM_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
@@ -139,28 +126,32 @@
};
/*******************************************************************************
- * Setup the pagetables as per the platform memory map & initialize the mmu
- *******************************************************************************/
-void configure_mmu(meminfo_t *mem_layout,
- unsigned long ro_start,
- unsigned long ro_limit,
- unsigned long coh_start,
- unsigned long coh_limit)
-{
- mmap_add_region(mem_layout->total_base, mem_layout->total_size,
- MT_MEMORY | MT_RW | MT_SECURE);
- mmap_add_region(ro_start, ro_limit - ro_start,
- MT_MEMORY | MT_RO | MT_SECURE);
- mmap_add_region(coh_start, coh_limit - coh_start,
- MT_DEVICE | MT_RW | MT_SECURE);
-
- mmap_add(fvp_mmap);
-
- init_xlat_tables();
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ ******************************************************************************/
+#define DEFINE_CONFIGURE_MMU_EL(_el) \
+ void configure_mmu_el##_el(meminfo_t *mem_layout, \
+ unsigned long ro_start, \
+ unsigned long ro_limit, \
+ unsigned long coh_start, \
+ unsigned long coh_limit) \
+ { \
+ mmap_add_region(mem_layout->total_base, \
+ mem_layout->total_size, \
+ MT_MEMORY | MT_RW | MT_SECURE); \
+ mmap_add_region(ro_start, ro_limit - ro_start, \
+ MT_MEMORY | MT_RO | MT_SECURE); \
+ mmap_add_region(coh_start, coh_limit - coh_start, \
+ MT_DEVICE | MT_RW | MT_SECURE); \
+ mmap_add(fvp_mmap); \
+ init_xlat_tables(); \
+ \
+ enable_mmu_el##_el(); \
+ }
- enable_mmu();
- return;
-}
+/* Define EL1 and EL3 variants of the function initialising the MMU */
+DEFINE_CONFIGURE_MMU_EL(1)
+DEFINE_CONFIGURE_MMU_EL(3)
/* Simple routine which returns a configuration variable value */
unsigned long platform_get_cfgvar(unsigned int var_id)
diff --git a/plat/fvp/bl1_plat_setup.c b/plat/fvp/bl1_plat_setup.c
index fd03ec2..edd3f7b 100644
--- a/plat/fvp/bl1_plat_setup.c
+++ b/plat/fvp/bl1_plat_setup.c
@@ -138,9 +138,9 @@
cci_enable_coherency(read_mpidr());
}
- configure_mmu(&bl1_tzram_layout,
- TZROM_BASE,
- TZROM_BASE + TZROM_SIZE,
- BL1_COHERENT_RAM_BASE,
- BL1_COHERENT_RAM_LIMIT);
+ configure_mmu_el3(&bl1_tzram_layout,
+ TZROM_BASE,
+ TZROM_BASE + TZROM_SIZE,
+ BL1_COHERENT_RAM_BASE,
+ BL1_COHERENT_RAM_LIMIT);
}
diff --git a/plat/fvp/bl2_plat_setup.c b/plat/fvp/bl2_plat_setup.c
index 4c649eb..80bb52e 100644
--- a/plat/fvp/bl2_plat_setup.c
+++ b/plat/fvp/bl2_plat_setup.c
@@ -172,9 +172,9 @@
******************************************************************************/
void bl2_plat_arch_setup()
{
- configure_mmu(&bl2_tzram_layout,
- BL2_RO_BASE,
- BL2_RO_LIMIT,
- BL2_COHERENT_RAM_BASE,
- BL2_COHERENT_RAM_LIMIT);
+ configure_mmu_el1(&bl2_tzram_layout,
+ BL2_RO_BASE,
+ BL2_RO_LIMIT,
+ BL2_COHERENT_RAM_BASE,
+ BL2_COHERENT_RAM_LIMIT);
}
diff --git a/plat/fvp/bl31_plat_setup.c b/plat/fvp/bl31_plat_setup.c
index 5c00baa..baf7df1 100644
--- a/plat/fvp/bl31_plat_setup.c
+++ b/plat/fvp/bl31_plat_setup.c
@@ -172,9 +172,9 @@
******************************************************************************/
void bl31_plat_arch_setup()
{
- configure_mmu(&bl2_to_bl31_args->bl31_meminfo,
- BL31_RO_BASE,
- BL31_RO_LIMIT,
- BL31_COHERENT_RAM_BASE,
- BL31_COHERENT_RAM_LIMIT);
+ configure_mmu_el3(&bl2_to_bl31_args->bl31_meminfo,
+ BL31_RO_BASE,
+ BL31_RO_LIMIT,
+ BL31_COHERENT_RAM_BASE,
+ BL31_COHERENT_RAM_LIMIT);
}
diff --git a/plat/fvp/bl32_plat_setup.c b/plat/fvp/bl32_plat_setup.c
index 9fe8fe1..bb2b602 100644
--- a/plat/fvp/bl32_plat_setup.c
+++ b/plat/fvp/bl32_plat_setup.c
@@ -111,9 +111,9 @@
******************************************************************************/
void bl32_plat_arch_setup()
{
- configure_mmu(&bl32_tzdram_layout,
- BL32_RO_BASE,
- BL32_RO_LIMIT,
- BL32_COHERENT_RAM_BASE,
- BL32_COHERENT_RAM_LIMIT);
+ configure_mmu_el1(&bl32_tzdram_layout,
+ BL32_RO_BASE,
+ BL32_RO_LIMIT,
+ BL32_COHERENT_RAM_BASE,
+ BL32_COHERENT_RAM_LIMIT);
}
diff --git a/plat/fvp/plat_security.c b/plat/fvp/plat_security.c
index 32306cd..c39907a 100644
--- a/plat/fvp/plat_security.c
+++ b/plat/fvp/plat_security.c
@@ -88,36 +88,33 @@
tzc_disable_filters(&controller);
/*
- * Allow full access to all DRAM to supported devices for the
- * moment. Give access to the CPUs and Virtio. Some devices
+ * Allow only non-secure access to all DRAM to supported devices.
+ * Give access to the CPUs and Virtio. Some devices
* would normally use the default ID so allow that too. We use
- * three different regions to cover the three separate blocks of
- * memory in the FVPs. We allow secure access to DRAM to load NS
- * software.
- * FIXME: In current models Virtio uses a reserved ID. This is
- * not correct and will be fixed.
+ * two regions to cover the blocks of physical memory in the FVPs.
+ *
+ * Software executing in the secure state, such as a secure
+ * boot-loader, can access the DRAM by using the NS attributes in
+ * the MMU translation tables and descriptors.
*/
- /* Set to cover 2GB block of DRAM */
+ /* Set to cover the first block of DRAM */
tzc_configure_region(&controller, FILTER_SHIFT(0), 1,
- DRAM_BASE, 0xFFFFFFFF, TZC_REGION_S_RDWR,
- TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
+ DRAM_BASE, 0xFFFFFFFF, TZC_REGION_S_NONE,
TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
- TZC_REGION_ACCESS_RDWR(FVP_NSAID_RES5));
+ TZC_REGION_ACCESS_RDWR(FVP_NSAID_PCI) |
+ TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
+ TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO) |
+ TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO_OLD));
- /* Set to cover the 30GB block */
+ /* Set to cover the second block of DRAM */
tzc_configure_region(&controller, FILTER_SHIFT(0), 2,
- 0x880000000, 0xFFFFFFFFF, TZC_REGION_S_RDWR,
- TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
+ 0x880000000, 0xFFFFFFFFF, TZC_REGION_S_NONE,
TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
- TZC_REGION_ACCESS_RDWR(FVP_NSAID_RES5));
-
- /* Set to cover 480GB block */
- tzc_configure_region(&controller, FILTER_SHIFT(0), 3,
- 0x8800000000, 0xFFFFFFFFFF, TZC_REGION_S_RDWR,
+ TZC_REGION_ACCESS_RDWR(FVP_NSAID_PCI) |
TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
- TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
- TZC_REGION_ACCESS_RDWR(FVP_NSAID_RES5));
+ TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO) |
+ TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO_OLD));
/*
* TODO: Interrupts are not currently supported. The only
diff --git a/plat/fvp/platform.h b/plat/fvp/platform.h
index 3fe892e..40f780e 100644
--- a/plat/fvp/platform.h
+++ b/plat/fvp/platform.h
@@ -307,18 +307,21 @@
/*
* The NSAIDs for this platform as used to program the TZC400.
- * TODO:
- * This list and the numbers in it is still changing on the Base FVP.
- * For now only specify the NSAIDs we actually use.
*/
/* The FVP has 4 bits of NSAIDs. Used with TZC FAIL_ID (ACE Lite ID width) */
#define FVP_AID_WIDTH 4
+
+/* NSAIDs used by devices in TZC filter 0 on FVP */
#define FVP_NSAID_DEFAULT 0
+#define FVP_NSAID_PCI 1
+#define FVP_NSAID_VIRTIO 8 /* from FVP v5.6 onwards */
#define FVP_NSAID_AP 9 /* Application Processors */
+#define FVP_NSAID_VIRTIO_OLD 15 /* until FVP v5.5 */
-/* FIXME: Currently incorrectly used by Virtio */
-#define FVP_NSAID_RES5 15
+/* NSAIDs used by devices in TZC filter 2 on FVP */
+#define FVP_NSAID_HDLCD0 2
+#define FVP_NSAID_CLCD 7
/*******************************************************************************
@@ -370,12 +373,18 @@
extern void bl31_plat_arch_setup(void);
extern int platform_setup_pm(const struct plat_pm_ops **);
extern unsigned int platform_get_core_pos(unsigned long mpidr);
-extern void enable_mmu(void);
-extern void configure_mmu(struct meminfo *,
- unsigned long,
- unsigned long,
- unsigned long,
- unsigned long);
+extern void enable_mmu_el1(void);
+extern void enable_mmu_el3(void);
+extern void configure_mmu_el1(struct meminfo *mem_layout,
+ unsigned long ro_start,
+ unsigned long ro_limit,
+ unsigned long coh_start,
+ unsigned long coh_limit);
+extern void configure_mmu_el3(struct meminfo *mem_layout,
+ unsigned long ro_start,
+ unsigned long ro_limit,
+ unsigned long coh_start,
+ unsigned long coh_limit);
extern unsigned long platform_get_cfgvar(unsigned int);
extern int platform_config_setup(void);
extern void plat_report_exception(unsigned long);
diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c
index 8f9bb4d..9f4ebf6 100644
--- a/services/std_svc/psci/psci_afflvl_on.c
+++ b/services/std_svc/psci/psci_afflvl_on.c
@@ -362,7 +362,7 @@
/*
* Arch. management: Turn on mmu & restore architectural state
*/
- enable_mmu();
+ enable_mmu_el3();
/*
* All the platform specific actions for turning this cpu