Merge pull request #1717 from satheesbalya-arm/sb1/sb1_2629_romlib_ifc
romlib: Add platform specific jump table list
diff --git a/Makefile b/Makefile
index 5b6bd71..5c61f8d 100644
--- a/Makefile
+++ b/Makefile
@@ -154,7 +154,7 @@
march32-directive = -march=armv8-a
endif
-ifeq ($(notdir $(CC)),armclang)
+ifneq ($(findstring armclang,$(notdir $(CC))),)
TF_CFLAGS_aarch32 = -target arm-arm-none-eabi $(march32-directive)
TF_CFLAGS_aarch64 = -target aarch64-arm-none-eabi -march=armv8-a
LD = $(LINKER)
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index 7d1944c..a6fe14f 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -248,6 +248,19 @@
DEFINE_COPROCR_RW_FUNCS(cnthp_tval_el2, CNTHP_TVAL)
DEFINE_COPROCR_RW_FUNCS_64(cnthp_cval_el2, CNTHP_CVAL_64)
+#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
+ CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
+ CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
+ CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x) ((x) |= U(1) << CNTP_CTL_ENABLE_SHIFT)
+#define set_cntp_ctl_imask(x) ((x) |= U(1) << CNTP_CTL_IMASK_SHIFT)
+
+#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+
DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE)
DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE)
DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE)
@@ -378,4 +391,59 @@
#define read_amcntenset0_el0() read_amcntenset0()
#define read_amcntenset1_el0() read_amcntenset1()
+/* Helper functions to manipulate CPSR */
+static inline void enable_irq(void)
+{
+ /*
+ * The compiler memory barrier will prevent the compiler from
+ * scheduling non-volatile memory access after the write to the
+ * register.
+ *
+ * This could happen if some initialization code issues non-volatile
+ * accesses to an area used by an interrupt handler, in the assumption
+ * that it is safe as the interrupts are disabled at the time it does
+ * that (according to program order). However, non-volatile accesses
+ * are not necessarily in program order relatively with volatile inline
+ * assembly statements (and volatile accesses).
+ */
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie i");
+ isb();
+}
+
+static inline void enable_serror(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie a");
+ isb();
+}
+
+static inline void enable_fiq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie f");
+ isb();
+}
+
+static inline void disable_irq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid i");
+ isb();
+}
+
+static inline void disable_serror(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid a");
+ isb();
+}
+
+static inline void disable_fiq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid f");
+ isb();
+}
+
#endif /* ARCH_HELPERS_H */
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index 97595e9..6f81e1b 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -534,19 +534,6 @@
#define CNTP_CTL_IMASK_MASK U(1)
#define CNTP_CTL_ISTATUS_MASK U(1)
-#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
- CNTP_CTL_ENABLE_MASK)
-#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
- CNTP_CTL_IMASK_MASK)
-#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
- CNTP_CTL_ISTATUS_MASK)
-
-#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
-#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
-
-#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
-#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
-
/* Exception Syndrome register bits and bobs */
#define ESR_EC_SHIFT U(26)
#define ESR_EC_MASK U(0x3f)
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 8b3d53a..7222b9d 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -215,11 +215,81 @@
DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_FUNC(isb)
+static inline void enable_irq(void)
+{
+ /*
+ * The compiler memory barrier will prevent the compiler from
+ * scheduling non-volatile memory access after the write to the
+ * register.
+ *
+ * This could happen if some initialization code issues non-volatile
+ * accesses to an area used by an interrupt handler, in the assumption
+ * that it is safe as the interrupts are disabled at the time it does
+ * that (according to program order). However, non-volatile accesses
+ * are not necessarily in program order relatively with volatile inline
+ * assembly statements (and volatile accesses).
+ */
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void enable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void enable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void enable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_DBG_BIT);
+ isb();
+}
+
+static inline void disable_irq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void disable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void disable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void disable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_DBG_BIT);
+ isb();
+}
+
+#if !ERROR_DEPRECATED
uint32_t get_afflvl_shift(uint32_t);
uint32_t mpidr_mask_lower_afflvls(uint64_t, uint32_t);
void __dead2 eret(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
+#endif
void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
@@ -306,6 +376,19 @@
DEFINE_SYSREG_READ_FUNC(cntpct_el0)
DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
+#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
+ CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
+ CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
+ CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
+
+#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+
DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
DEFINE_SYSREG_RW_FUNCS(cntvoff_el2)
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index 002942e..8920f72 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -9,9 +9,11 @@
#include <assert_macros.S>
#include <xlat_tables_defs.h>
+#if !ERROR_DEPRECATED
.globl get_afflvl_shift
.globl mpidr_mask_lower_afflvls
.globl eret
+#endif /* ERROR_DEPRECATED */
.globl smc
.globl zero_normalmem
@@ -30,6 +32,7 @@
.globl enable_vfp
#endif
+#if !ERROR_DEPRECATED
func get_afflvl_shift
cmp x0, #3
cinc x0, x0, eq
@@ -52,7 +55,7 @@
func eret
eret
endfunc eret
-
+#endif /* ERROR_DEPRECATED */
func smc
smc #0
diff --git a/plat/st/stm32mp1/platform.mk b/plat/st/stm32mp1/platform.mk
index 545b140..8890e82 100644
--- a/plat/st/stm32mp1/platform.mk
+++ b/plat/st/stm32mp1/platform.mk
@@ -83,10 +83,6 @@
plat/st/stm32mp1/plat_bl2_mem_params_desc.c \
plat/st/stm32mp1/plat_image_load.c
-# For memory footprint optimization, build with thumb and interwork support
-ASFLAGS += -mthumb -mthumb-interwork
-TF_CFLAGS += -mthumb -mthumb-interwork
-
# Macros and rules to build TF binary
STM32_TF_ELF_LDFLAGS := --hash-style=gnu --as-needed
STM32_DT_BASENAME := $(STM32_DTB_FILE_NAME:.dtb=)