Merge pull request #1287 from davidcunado-arm/dc/fix_misra
Update ULL() macro and instances of ull to comply with MISRA
diff --git a/bl1/bl1.ld.S b/bl1/bl1.ld.S
index e4c454b..26c0ae4 100644
--- a/bl1/bl1.ld.S
+++ b/bl1/bl1.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -129,7 +129,8 @@
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unecessary zero init
+ * the .bss section. The tables are initialized to zero by the translation
+ * tables library.
*/
xlat_table (NOLOAD) : {
*(xlat_table)
diff --git a/bl2/bl2.ld.S b/bl2/bl2.ld.S
index 4fe78f9..69c22eb 100644
--- a/bl2/bl2.ld.S
+++ b/bl2/bl2.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -108,7 +108,8 @@
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unecessary zero init
+ * the .bss section. The tables are initialized to zero by the translation
+ * tables library.
*/
xlat_table (NOLOAD) : {
*(xlat_table)
diff --git a/bl2/bl2_el3.ld.S b/bl2/bl2_el3.ld.S
index 57709e3..3728643 100644
--- a/bl2/bl2_el3.ld.S
+++ b/bl2/bl2_el3.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -139,7 +139,8 @@
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unnecessary zero init
+ * the .bss section. The tables are initialized to zero by the translation
+ * tables library.
*/
xlat_table (NOLOAD) : {
*(xlat_table)
diff --git a/bl2u/bl2u.ld.S b/bl2u/bl2u.ld.S
index da58717..7b97758 100644
--- a/bl2u/bl2u.ld.S
+++ b/bl2u/bl2u.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -95,7 +95,8 @@
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unecessary zero init
+ * the .bss section. The tables are initialized to zero by the translation
+ * tables library.
*/
xlat_table (NOLOAD) : {
*(xlat_table)
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 924f295..0d1077c 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -9,7 +9,7 @@
#include <el3_common_macros.S>
#include <pmf_asm_macros.S>
#include <runtime_instr.h>
-#include <xlat_tables_defs.h>
+#include <xlat_mmu_helpers.h>
.globl bl31_entrypoint
.globl bl31_warm_entrypoint
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
index dd046c4..c6a4fe4 100644
--- a/bl31/bl31.ld.S
+++ b/bl31/bl31.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -217,7 +217,8 @@
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unecessary zero init
+ * the .bss section. The tables are initialized to zero by the translation
+ * tables library.
*/
xlat_table (NOLOAD) : {
#if ENABLE_SPM
diff --git a/bl31/ehf.c b/bl31/ehf.c
index 39ee635..8673564 100644
--- a/bl31/ehf.c
+++ b/bl31/ehf.c
@@ -9,6 +9,8 @@
*/
#include <assert.h>
+#include <context.h>
+#include <context_mgmt.h>
#include <cpu_data.h>
#include <debug.h>
#include <ehf.h>
@@ -308,15 +310,17 @@
/*
* Program Priority Mask to the original Non-secure priority such that
* Non-secure interrupts may preempt Secure execution, viz. during Yielding SMC
- * calls.
+ * calls. The 'preempt_ret_code' parameter indicates the Yielding SMC's return
+ * value in case the call was preempted.
*
* This API is expected to be invoked before delegating a yielding SMC to Secure
* EL1. I.e. within the window of secure execution after Non-secure context is
* saved (after entry into EL3) and Secure context is restored (before entering
* Secure EL1).
*/
-void ehf_allow_ns_preemption(void)
+void ehf_allow_ns_preemption(uint64_t preempt_ret_code)
{
+ cpu_context_t *ns_ctx;
unsigned int old_pmr __unused;
pe_exc_data_t *pe_data = this_cpu_data();
@@ -333,6 +337,15 @@
panic();
}
+ /*
+ * Program preempted return code to x0 right away so that, if the
+ * Yielding SMC was indeed preempted before a dispatcher gets a chance
+ * to populate it, the caller would find the correct return value.
+ */
+ ns_ctx = cm_get_context(NON_SECURE);
+ assert(ns_ctx);
+ write_ctx_reg(get_gpregs_ctx(ns_ctx), CTX_GPREG_X0, preempt_ret_code);
+
old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask);
EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask);
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
index e798a0d..71de883 100644
--- a/bl32/sp_min/sp_min.ld.S
+++ b/bl32/sp_min/sp_min.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -176,7 +176,8 @@
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unecessary zero init
+ * the .bss section. The tables are initialized to zero by the translation
+ * tables library.
*/
xlat_table (NOLOAD) : {
*(xlat_table)
diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S
index d256b46..31c5a67 100644
--- a/bl32/tsp/tsp.ld.S
+++ b/bl32/tsp/tsp.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -94,7 +94,8 @@
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unecessary zero init
+ * the .bss section. The tables are initialized to zero by the translation
+ * tables library.
*/
xlat_table (NOLOAD) : {
*(xlat_table)
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index f21baf5..21db86b 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -2022,9 +2022,9 @@
Critical SDEI events on the platform. This must have a lower value (therefore of
higher priority) than ``PLAT_SDEI_NORMAL_PRI``.
-It's recommended that SDEI exception priorities in general are assigned the
-lowest among Secure priorities. Among the SDEI exceptions, Critical SDEI
-priority must be higher than Normal SDEI priority.
+**Note**: SDEI exception priorities must be the lowest among Secure priorities.
+Among the SDEI exceptions, Critical SDEI priority must be higher than Normal
+SDEI priority.
Functions
.........
diff --git a/include/bl31/ehf.h b/include/bl31/ehf.h
index be8c957..f963f8d 100644
--- a/include/bl31/ehf.h
+++ b/include/bl31/ehf.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -82,7 +82,7 @@
void ehf_activate_priority(unsigned int priority);
void ehf_deactivate_priority(unsigned int priority);
void ehf_register_priority_handler(unsigned int pri, ehf_handler_t handler);
-void ehf_allow_ns_preemption(void);
+void ehf_allow_ns_preemption(uint64_t preempt_ret_code);
unsigned int ehf_is_ns_preemption_allowed(void);
#endif /* __ASSEMBLY__ */
diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h
index 940125d..20f0251 100644
--- a/include/lib/cpus/aarch64/cortex_a75.h
+++ b/include/lib/cpus/aarch64/cortex_a75.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -19,28 +19,6 @@
/* Definitions of register field mask in CORTEX_A75_CPUPWRCTLR_EL1 */
#define CORTEX_A75_CORE_PWRDN_EN_MASK 0x1
-/*******************************************************************************
- * CPU Activity Monitor Unit register specific definitions.
- ******************************************************************************/
-#define CPUAMCNTENCLR_EL0 S3_3_C15_C9_7
-#define CPUAMCNTENSET_EL0 S3_3_C15_C9_6
-#define CPUAMCFGR_EL0 S3_3_C15_C10_6
-#define CPUAMUSERENR_EL0 S3_3_C15_C10_7
-
-/* Activity Monitor Event Counter Registers */
-#define CPUAMEVCNTR0_EL0 S3_3_C15_C9_0
-#define CPUAMEVCNTR1_EL0 S3_3_C15_C9_1
-#define CPUAMEVCNTR2_EL0 S3_3_C15_C9_2
-#define CPUAMEVCNTR3_EL0 S3_3_C15_C9_3
-#define CPUAMEVCNTR4_EL0 S3_3_C15_C9_4
-
-/* Activity Monitor Event Type Registers */
-#define CPUAMEVTYPER0_EL0 S3_3_C15_C10_0
-#define CPUAMEVTYPER1_EL0 S3_3_C15_C10_1
-#define CPUAMEVTYPER2_EL0 S3_3_C15_C10_2
-#define CPUAMEVTYPER3_EL0 S3_3_C15_C10_3
-#define CPUAMEVTYPER4_EL0 S3_3_C15_C10_4
-
#define CORTEX_A75_ACTLR_AMEN_BIT (U(1) << 4)
/*
@@ -50,9 +28,9 @@
* CPUAMEVTYPER<n> register and are disabled by default. Platforms may
* enable this with suitable programming.
*/
-#define CORTEX_A75_AMU_NR_COUNTERS 5
-#define CORTEX_A75_AMU_GROUP0_MASK 0x7
-#define CORTEX_A75_AMU_GROUP1_MASK (0 << 3)
+#define CORTEX_A75_AMU_NR_COUNTERS U(5)
+#define CORTEX_A75_AMU_GROUP0_MASK U(0x7)
+#define CORTEX_A75_AMU_GROUP1_MASK (U(0) << 3)
#ifndef __ASSEMBLY__
#include <stdint.h>
diff --git a/include/lib/cpus/aarch64/cpuamu.h b/include/lib/cpus/aarch64/cpuamu.h
new file mode 100644
index 0000000..960a524
--- /dev/null
+++ b/include/lib/cpus/aarch64/cpuamu.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CPUAMU_H__
+#define __CPUAMU_H__
+
+/*******************************************************************************
+ * CPU Activity Monitor Unit register specific definitions.
+ ******************************************************************************/
+#define CPUAMCNTENCLR_EL0 S3_3_C15_C9_7
+#define CPUAMCNTENSET_EL0 S3_3_C15_C9_6
+#define CPUAMCFGR_EL0 S3_3_C15_C10_6
+#define CPUAMUSERENR_EL0 S3_3_C15_C10_7
+
+/* Activity Monitor Event Counter Registers */
+#define CPUAMEVCNTR0_EL0 S3_3_C15_C9_0
+#define CPUAMEVCNTR1_EL0 S3_3_C15_C9_1
+#define CPUAMEVCNTR2_EL0 S3_3_C15_C9_2
+#define CPUAMEVCNTR3_EL0 S3_3_C15_C9_3
+#define CPUAMEVCNTR4_EL0 S3_3_C15_C9_4
+
+/* Activity Monitor Event Type Registers */
+#define CPUAMEVTYPER0_EL0 S3_3_C15_C10_0
+#define CPUAMEVTYPER1_EL0 S3_3_C15_C10_1
+#define CPUAMEVTYPER2_EL0 S3_3_C15_C10_2
+#define CPUAMEVTYPER3_EL0 S3_3_C15_C10_3
+#define CPUAMEVTYPER4_EL0 S3_3_C15_C10_4
+
+#ifndef __ASSEMBLY__
+#include <stdint.h>
+
+uint64_t cpuamu_cnt_read(int idx);
+void cpuamu_cnt_write(int idx, uint64_t val);
+unsigned int cpuamu_read_cpuamcntenset_el0(void);
+unsigned int cpuamu_read_cpuamcntenclr_el0(void);
+void cpuamu_write_cpuamcntenset_el0(unsigned int mask);
+void cpuamu_write_cpuamcntenclr_el0(unsigned int mask);
+
+int midr_match(unsigned int cpu_midr);
+void cpuamu_context_save(unsigned int nr_counters);
+void cpuamu_context_restore(unsigned int nr_counters);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __CPUAMU_H__ */
diff --git a/include/lib/extensions/spe.h b/include/lib/extensions/spe.h
index 8a74127..b2b188e 100644
--- a/include/lib/extensions/spe.h
+++ b/include/lib/extensions/spe.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,6 +7,7 @@
#ifndef __SPE_H__
#define __SPE_H__
+int spe_supported(void);
void spe_enable(int el2_unused);
void spe_disable(void);
diff --git a/include/lib/extensions/sve.h b/include/lib/extensions/sve.h
index 28923e3..9c7f37f 100644
--- a/include/lib/extensions/sve.h
+++ b/include/lib/extensions/sve.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,6 +7,7 @@
#ifndef __SVE_H__
#define __SVE_H__
+int sve_supported(void);
void sve_enable(int el2_unused);
#endif /* __SVE_H__ */
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
index fd3efc3..d83d764 100644
--- a/include/lib/xlat_tables/xlat_mmu_helpers.h
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,13 +7,51 @@
#ifndef __XLAT_MMU_HELPERS_H__
#define __XLAT_MMU_HELPERS_H__
+/*
+ * The following flags are passed to enable_mmu_xxx() to override the default
+ * values used to program system registers while enabling the MMU.
+ */
+
+/*
+ * When this flag is used, all data access to Normal memory from this EL and all
+ * Normal memory accesses to the translation tables of this EL are non-cacheable
+ * for all levels of data and unified cache until the caches are enabled by
+ * setting the bit SCTLR_ELx.C.
+ */
+#define DISABLE_DCACHE (U(1) << 0)
+
+/*
+ * Mark the translation tables as non-cacheable for the MMU table walker, which
+ * is a different observer from the PE/CPU. If the flag is not specified, the
+ * tables are cacheable for the MMU table walker.
+ *
+ * Note that, as far as the PE/CPU observer is concerned, the attributes used
+ * are the ones specified in the translation tables themselves. The MAIR
+ * register specifies the cacheability through the field AttrIndx of the lower
+ * attributes of the translation tables. The shareability is specified in the SH
+ * field of the lower attributes.
+ *
+ * The MMU table walker uses the attributes specified in the fields ORGNn, IRGNn
+ * and SHn of the TCR register to access the translation tables.
+ *
+ * The attributes specified in the TCR register and the tables can be different
+ * as there are no checks to prevent that. Special care must be taken to ensure
+ * that there aren't mismatches. The behaviour in that case is described in the
+ * sections 'Mismatched memory attributes' in the ARMv8 ARM.
+ */
+#define XLAT_TABLE_NC (U(1) << 1)
+
+#ifndef __ASSEMBLY__
+
#ifdef AARCH32
/* AArch32 specific translation table API */
-void enable_mmu_secure(uint32_t flags);
+void enable_mmu_secure(unsigned int flags);
#else
/* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags);
void enable_mmu_el3(unsigned int flags);
#endif /* AARCH32 */
+#endif /* __ASSEMBLY__ */
+
#endif /* __XLAT_MMU_HELPERS_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
index 3a7f245..1c84fe0 100644
--- a/include/lib/xlat_tables/xlat_tables_defs.h
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
#include <arch.h>
#include <utils_def.h>
+#include <xlat_mmu_helpers.h>
/* Miscellaneous MMU related constants */
#define NUM_2MB_IN_GB (U(1) << 9)
@@ -165,16 +166,4 @@
#define XN_SHIFT 54
#define UXN_SHIFT XN_SHIFT
-/*
- * Flags to override default values used to program system registers while
- * enabling the MMU.
- */
-#define DISABLE_DCACHE (U(1) << 0)
-
-/*
- * This flag marks the translation tables are Non-cacheable for MMU accesses.
- * If the flag is not specified, by default the tables are cacheable.
- */
-#define XLAT_TABLE_NC (U(1) << 1)
-
#endif /* __XLAT_TABLES_DEFS_H__ */
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 946f988..12ea304 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -6,108 +6,9 @@
#include <arch.h>
#include <asm_macros.S>
-#include <bl_common.h>
-#include <cpu_macros.S>
-#include <plat_macros.S>
#include <cortex_a75.h>
-
- .globl cortex_a75_amu_cnt_read
- .globl cortex_a75_amu_cnt_write
- .globl cortex_a75_amu_read_cpuamcntenset_el0
- .globl cortex_a75_amu_read_cpuamcntenclr_el0
- .globl cortex_a75_amu_write_cpuamcntenset_el0
- .globl cortex_a75_amu_write_cpuamcntenclr_el0
-
-/*
- * uint64_t cortex_a75_amu_cnt_read(int idx);
- *
- * Given `idx`, read the corresponding AMU counter
- * and return it in `x0`.
- */
-func cortex_a75_amu_cnt_read
- adr x1, 1f
- lsl x0, x0, #3
- add x1, x1, x0
- br x1
-
-1:
- mrs x0, CPUAMEVCNTR0_EL0
- ret
- mrs x0, CPUAMEVCNTR1_EL0
- ret
- mrs x0, CPUAMEVCNTR2_EL0
- ret
- mrs x0, CPUAMEVCNTR3_EL0
- ret
- mrs x0, CPUAMEVCNTR4_EL0
- ret
-endfunc cortex_a75_amu_cnt_read
-
-/*
- * void cortex_a75_amu_cnt_write(int idx, uint64_t val);
- *
- * Given `idx`, write `val` to the corresponding AMU counter.
- */
-func cortex_a75_amu_cnt_write
- adr x2, 1f
- lsl x0, x0, #3
- add x2, x2, x0
- br x2
-
-1:
- msr CPUAMEVCNTR0_EL0, x0
- ret
- msr CPUAMEVCNTR1_EL0, x0
- ret
- msr CPUAMEVCNTR2_EL0, x0
- ret
- msr CPUAMEVCNTR3_EL0, x0
- ret
- msr CPUAMEVCNTR4_EL0, x0
- ret
-endfunc cortex_a75_amu_cnt_write
-
-/*
- * unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
- *
- * Read the `CPUAMCNTENSET_EL0` CPU register and return
- * it in `x0`.
- */
-func cortex_a75_amu_read_cpuamcntenset_el0
- mrs x0, CPUAMCNTENSET_EL0
- ret
-endfunc cortex_a75_amu_read_cpuamcntenset_el0
-
-/*
- * unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
- *
- * Read the `CPUAMCNTENCLR_EL0` CPU register and return
- * it in `x0`.
- */
-func cortex_a75_amu_read_cpuamcntenclr_el0
- mrs x0, CPUAMCNTENCLR_EL0
- ret
-endfunc cortex_a75_amu_read_cpuamcntenclr_el0
-
-/*
- * void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
- *
- * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
- */
-func cortex_a75_amu_write_cpuamcntenset_el0
- msr CPUAMCNTENSET_EL0, x0
- ret
-endfunc cortex_a75_amu_write_cpuamcntenset_el0
-
-/*
- * void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
- *
- * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
- */
-func cortex_a75_amu_write_cpuamcntenclr_el0
- mrs x0, CPUAMCNTENCLR_EL0
- ret
-endfunc cortex_a75_amu_write_cpuamcntenclr_el0
+#include <cpuamu.h>
+#include <cpu_macros.S>
func cortex_a75_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
diff --git a/lib/cpus/aarch64/cortex_a75_pubsub.c b/lib/cpus/aarch64/cortex_a75_pubsub.c
index a1ffcb0..16f62f4 100644
--- a/lib/cpus/aarch64/cortex_a75_pubsub.c
+++ b/lib/cpus/aarch64/cortex_a75_pubsub.c
@@ -1,73 +1,24 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cortex_a75.h>
-#include <platform.h>
+#include <cpuamu.h>
#include <pubsub_events.h>
-struct amu_ctx {
- uint64_t cnts[CORTEX_A75_AMU_NR_COUNTERS];
- uint16_t mask;
-};
-
-static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
-
static void *cortex_a75_context_save(const void *arg)
{
- struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
- unsigned int midr;
- unsigned int midr_mask;
- int i;
-
- midr = read_midr();
- midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
- (MIDR_PN_MASK << MIDR_PN_SHIFT);
- if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
- return 0;
-
- /* Save counter configuration */
- ctx->mask = cortex_a75_amu_read_cpuamcntenset_el0();
-
- /* Ensure counters are disabled */
- cortex_a75_amu_write_cpuamcntenclr_el0(ctx->mask);
- isb();
-
- /* Save counters */
- for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
- ctx->cnts[i] = cortex_a75_amu_cnt_read(i);
-
+ if (midr_match(CORTEX_A75_MIDR) != 0)
+ cpuamu_context_save(CORTEX_A75_AMU_NR_COUNTERS);
return 0;
}
static void *cortex_a75_context_restore(const void *arg)
{
- struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
- unsigned int midr;
- unsigned int midr_mask;
- int i;
-
- midr = read_midr();
- midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
- (MIDR_PN_MASK << MIDR_PN_SHIFT);
- if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
- return 0;
-
- ctx = &amu_ctxs[plat_my_core_pos()];
-
- /* Counters were disabled in `cortex_a75_context_save()` */
- assert(cortex_a75_amu_read_cpuamcntenset_el0() == 0);
-
- /* Restore counters */
- for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
- cortex_a75_amu_cnt_write(i, ctx->cnts[i]);
- isb();
-
- /* Restore counter configuration */
- cortex_a75_amu_write_cpuamcntenset_el0(ctx->mask);
-
+ if (midr_match(CORTEX_A75_MIDR) != 0)
+ cpuamu_context_restore(CORTEX_A75_AMU_NR_COUNTERS);
return 0;
}
diff --git a/lib/cpus/aarch64/cpuamu.c b/lib/cpus/aarch64/cpuamu.c
new file mode 100644
index 0000000..b9bad86
--- /dev/null
+++ b/lib/cpus/aarch64/cpuamu.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cpuamu.h>
+#include <platform.h>
+#include <pubsub_events.h>
+
+#define CPUAMU_NR_COUNTERS 5U
+
+struct amu_ctx {
+ uint64_t cnts[CPUAMU_NR_COUNTERS];
+ unsigned int mask;
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+int midr_match(unsigned int cpu_midr)
+{
+ unsigned int midr, midr_mask;
+
+ midr = (unsigned int)read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ return ((midr & midr_mask) == (cpu_midr & midr_mask));
+}
+
+void cpuamu_context_save(unsigned int nr_counters)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int i;
+
+ assert(nr_counters <= CPUAMU_NR_COUNTERS);
+
+ /* Save counter configuration */
+ ctx->mask = cpuamu_read_cpuamcntenset_el0();
+
+ /* Disable counters */
+ cpuamu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Save counters */
+ for (i = 0; i < nr_counters; i++)
+ ctx->cnts[i] = cpuamu_cnt_read(i);
+}
+
+void cpuamu_context_restore(unsigned int nr_counters)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int i;
+
+ assert(nr_counters <= CPUAMU_NR_COUNTERS);
+
+ /*
+ * Disable counters. They were enabled early in the
+ * CPU reset function.
+ */
+ cpuamu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Restore counters */
+ for (i = 0; i < nr_counters; i++)
+ cpuamu_cnt_write(i, ctx->cnts[i]);
+ isb();
+
+ /* Restore counter configuration */
+ cpuamu_write_cpuamcntenset_el0(ctx->mask);
+}
diff --git a/lib/cpus/aarch64/cpuamu_helpers.S b/lib/cpus/aarch64/cpuamu_helpers.S
new file mode 100644
index 0000000..8965d6d
--- /dev/null
+++ b/lib/cpus/aarch64/cpuamu_helpers.S
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpuamu.h>
+
+ .globl cpuamu_cnt_read
+ .globl cpuamu_cnt_write
+ .globl cpuamu_read_cpuamcntenset_el0
+ .globl cpuamu_read_cpuamcntenclr_el0
+ .globl cpuamu_write_cpuamcntenset_el0
+ .globl cpuamu_write_cpuamcntenclr_el0
+
+/*
+ * uint64_t cpuamu_cnt_read(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func cpuamu_cnt_read
+ adr x1, 1f
+ lsl x0, x0, #3
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, CPUAMEVCNTR0_EL0
+ ret
+ mrs x0, CPUAMEVCNTR1_EL0
+ ret
+ mrs x0, CPUAMEVCNTR2_EL0
+ ret
+ mrs x0, CPUAMEVCNTR3_EL0
+ ret
+ mrs x0, CPUAMEVCNTR4_EL0
+ ret
+endfunc cpuamu_cnt_read
+
+/*
+ * void cpuamu_cnt_write(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func cpuamu_cnt_write
+ adr x2, 1f
+ lsl x0, x0, #3
+ add x2, x2, x0
+ br x2
+
+1:
+ msr CPUAMEVCNTR0_EL0, x0
+ ret
+ msr CPUAMEVCNTR1_EL0, x0
+ ret
+ msr CPUAMEVCNTR2_EL0, x0
+ ret
+ msr CPUAMEVCNTR3_EL0, x0
+ ret
+ msr CPUAMEVCNTR4_EL0, x0
+ ret
+endfunc cpuamu_cnt_write
+
+/*
+ * unsigned int cpuamu_read_cpuamcntenset_el0(void);
+ *
+ * Read the `CPUAMCNTENSET_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cpuamu_read_cpuamcntenset_el0
+ mrs x0, CPUAMCNTENSET_EL0
+ ret
+endfunc cpuamu_read_cpuamcntenset_el0
+
+/*
+ * unsigned int cpuamu_read_cpuamcntenclr_el0(void);
+ *
+ * Read the `CPUAMCNTENCLR_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cpuamu_read_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cpuamu_read_cpuamcntenclr_el0
+
+/*
+ * void cpuamu_write_cpuamcntenset_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
+ */
+func cpuamu_write_cpuamcntenset_el0
+ msr CPUAMCNTENSET_EL0, x0
+ ret
+endfunc cpuamu_write_cpuamcntenset_el0
+
+/*
+ * void cpuamu_write_cpuamcntenclr_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
+ */
+func cpuamu_write_cpuamcntenclr_el0
+ msr CPUAMCNTENCLR_EL0, x0
+ ret
+endfunc cpuamu_write_cpuamcntenclr_el0
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index 68cc4b3..05c98f1 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -30,7 +30,7 @@
void amu_enable(int el2_unused)
{
- if (!amu_supported())
+ if (amu_supported() == 0)
return;
if (el2_unused) {
@@ -54,7 +54,7 @@
/* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
@@ -63,7 +63,7 @@
/* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
@@ -73,7 +73,7 @@
/* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
@@ -82,7 +82,7 @@
/* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
@@ -91,7 +91,7 @@
void amu_group1_set_evtype(int idx, unsigned int val)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
@@ -103,13 +103,14 @@
struct amu_ctx *ctx;
int i;
- if (!amu_supported())
+ if (amu_supported() == 0)
return (void *)-1;
ctx = &amu_ctxs[plat_my_core_pos()];
/* Assert that group 0 counter configuration is what we expect */
- assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK);
+ assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK &&
+ read_amcntenset1() == AMU_GROUP1_COUNTERS_MASK);
/*
* Disable group 0 counters to avoid other observers like SCP sampling
@@ -131,17 +132,15 @@
static void *amu_context_restore(const void *arg)
{
struct amu_ctx *ctx;
- uint64_t features;
int i;
- features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- if ((features & ID_PFR0_AMU_MASK) != 1)
+ if (amu_supported() == 0)
return (void *)-1;
ctx = &amu_ctxs[plat_my_core_pos()];
/* Counters were disabled in `amu_context_save()` */
- assert(read_amcntenset0() == 0);
+ assert(read_amcntenset0() == 0 && read_amcntenset1() == 0);
/* Restore group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
diff --git a/lib/extensions/amu/aarch32/amu_helpers.S b/lib/extensions/amu/aarch32/amu_helpers.S
index 84dca04..effb8e5 100644
--- a/lib/extensions/amu/aarch32/amu_helpers.S
+++ b/lib/extensions/amu/aarch32/amu_helpers.S
@@ -18,7 +18,7 @@
* uint64_t amu_group0_cnt_read_internal(int idx);
*
* Given `idx`, read the corresponding AMU counter
- * and return it in `r0`.
+ * and return it in `r0` and `r1`.
*/
func amu_group0_cnt_read_internal
#if ENABLE_ASSERTIONS
@@ -52,13 +52,15 @@
* void amu_group0_cnt_write_internal(int idx, uint64_t val);
*
* Given `idx`, write `val` to the corresponding AMU counter.
+ * `idx` is passed in `r0` and `val` is passed in `r2` and `r3`.
+ * `r1` is used as a scratch register.
*/
func amu_group0_cnt_write_internal
#if ENABLE_ASSERTIONS
/* `idx` should be between [0, 3] */
- mov r2, r0
- lsr r2, r2, #2
- cmp r2, #0
+ mov r1, r0
+ lsr r1, r1, #2
+ cmp r1, #0
ASM_ASSERT(eq)
#endif
@@ -66,19 +68,19 @@
* Given `idx` calculate address of stcopr16/bx lr instruction pair
* in the table below.
*/
- adr r2, 1f
+ adr r1, 1f
lsl r0, r0, #3 /* each stcopr16/bx lr sequence is 8 bytes */
- add r2, r2, r0
- bx r2
+ add r1, r1, r0
+ bx r1
1:
- stcopr16 r0,r1, AMEVCNTR00 /* index 0 */
+ stcopr16 r2, r3, AMEVCNTR00 /* index 0 */
bx lr
- stcopr16 r0,r1, AMEVCNTR01 /* index 1 */
+ stcopr16 r2, r3, AMEVCNTR01 /* index 1 */
bx lr
- stcopr16 r0,r1, AMEVCNTR02 /* index 2 */
+ stcopr16 r2, r3, AMEVCNTR02 /* index 2 */
bx lr
- stcopr16 r0,r1, AMEVCNTR03 /* index 3 */
+ stcopr16 r2, r3, AMEVCNTR03 /* index 3 */
bx lr
endfunc amu_group0_cnt_write_internal
@@ -86,14 +88,14 @@
* uint64_t amu_group1_cnt_read_internal(int idx);
*
* Given `idx`, read the corresponding AMU counter
- * and return it in `r0`.
+ * and return it in `r0` and `r1`.
*/
func amu_group1_cnt_read_internal
#if ENABLE_ASSERTIONS
/* `idx` should be between [0, 15] */
- mov r2, r0
- lsr r2, r2, #4
- cmp r2, #0
+ mov r1, r0
+ lsr r1, r1, #4
+ cmp r1, #0
ASM_ASSERT(eq)
#endif
@@ -107,51 +109,53 @@
bx r1
1:
- ldcopr16 r0,r1, AMEVCNTR10 /* index 0 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR11 /* index 1 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR12 /* index 2 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR13 /* index 3 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR14 /* index 4 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR15 /* index 5 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR16 /* index 6 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR17 /* index 7 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR18 /* index 8 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR19 /* index 9 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR1A /* index 10 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR1B /* index 11 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR1C /* index 12 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR1D /* index 13 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR1E /* index 14 */
- bx lr
- ldcopr16 r0,r1, AMEVCNTR1F /* index 15 */
- bx lr
+ ldcopr16 r0, r1, AMEVCNTR10 /* index 0 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR11 /* index 1 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR12 /* index 2 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR13 /* index 3 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR14 /* index 4 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR15 /* index 5 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR16 /* index 6 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR17 /* index 7 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR18 /* index 8 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR19 /* index 9 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR1A /* index 10 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR1B /* index 11 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR1C /* index 12 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR1D /* index 13 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR1E /* index 14 */
+ bx lr
+ ldcopr16 r0, r1, AMEVCNTR1F /* index 15 */
+ bx lr
endfunc amu_group1_cnt_read_internal
/*
* void amu_group1_cnt_write_internal(int idx, uint64_t val);
*
* Given `idx`, write `val` to the corresponding AMU counter.
+ * `idx` is passed in `r0` and `val` is passed in `r2` and `r3`.
+ * `r1` is used as a scratch register.
*/
func amu_group1_cnt_write_internal
#if ENABLE_ASSERTIONS
/* `idx` should be between [0, 15] */
- mov r2, r0
- lsr r2, r2, #4
- cmp r2, #0
+ mov r1, r0
+ lsr r1, r1, #4
+ cmp r1, #0
ASM_ASSERT(eq)
#endif
@@ -159,43 +163,43 @@
* Given `idx` calculate address of ldcopr16/bx lr instruction pair
* in the table below.
*/
- adr r2, 1f
+ adr r1, 1f
lsl r0, r0, #3 /* each stcopr16/bx lr sequence is 8 bytes */
- add r2, r2, r0
- bx r2
+ add r1, r1, r0
+ bx r1
1:
- stcopr16 r0,r1, AMEVCNTR10 /* index 0 */
+ stcopr16 r2, r3, AMEVCNTR10 /* index 0 */
bx lr
- stcopr16 r0,r1, AMEVCNTR11 /* index 1 */
+ stcopr16 r2, r3, AMEVCNTR11 /* index 1 */
bx lr
- stcopr16 r0,r1, AMEVCNTR12 /* index 2 */
+ stcopr16 r2, r3, AMEVCNTR12 /* index 2 */
bx lr
- stcopr16 r0,r1, AMEVCNTR13 /* index 3 */
+ stcopr16 r2, r3, AMEVCNTR13 /* index 3 */
bx lr
- stcopr16 r0,r1, AMEVCNTR14 /* index 4 */
+ stcopr16 r2, r3, AMEVCNTR14 /* index 4 */
bx lr
- stcopr16 r0,r1, AMEVCNTR15 /* index 5 */
+ stcopr16 r2, r3, AMEVCNTR15 /* index 5 */
bx lr
- stcopr16 r0,r1, AMEVCNTR16 /* index 6 */
+ stcopr16 r2, r3, AMEVCNTR16 /* index 6 */
bx lr
- stcopr16 r0,r1, AMEVCNTR17 /* index 7 */
+ stcopr16 r2, r3, AMEVCNTR17 /* index 7 */
bx lr
- stcopr16 r0,r1, AMEVCNTR18 /* index 8 */
+ stcopr16 r2, r3, AMEVCNTR18 /* index 8 */
bx lr
- stcopr16 r0,r1, AMEVCNTR19 /* index 9 */
+ stcopr16 r2, r3, AMEVCNTR19 /* index 9 */
bx lr
- stcopr16 r0,r1, AMEVCNTR1A /* index 10 */
+ stcopr16 r2, r3, AMEVCNTR1A /* index 10 */
bx lr
- stcopr16 r0,r1, AMEVCNTR1B /* index 11 */
+ stcopr16 r2, r3, AMEVCNTR1B /* index 11 */
bx lr
- stcopr16 r0,r1, AMEVCNTR1C /* index 12 */
+ stcopr16 r2, r3, AMEVCNTR1C /* index 12 */
bx lr
- stcopr16 r0,r1, AMEVCNTR1D /* index 13 */
+ stcopr16 r2, r3, AMEVCNTR1D /* index 13 */
bx lr
- stcopr16 r0,r1, AMEVCNTR1E /* index 14 */
+ stcopr16 r2, r3, AMEVCNTR1E /* index 14 */
bx lr
- stcopr16 r0,r1, AMEVCNTR1F /* index 15 */
+ stcopr16 r2, r3, AMEVCNTR1F /* index 15 */
bx lr
endfunc amu_group1_cnt_write_internal
@@ -230,36 +234,36 @@
bx r2
1:
- stcopr r0, AMEVTYPER10 /* index 0 */
+ stcopr r1, AMEVTYPER10 /* index 0 */
bx lr
- stcopr r0, AMEVTYPER11 /* index 1 */
+ stcopr r1, AMEVTYPER11 /* index 1 */
bx lr
- stcopr r0, AMEVTYPER12 /* index 2 */
+ stcopr r1, AMEVTYPER12 /* index 2 */
bx lr
- stcopr r0, AMEVTYPER13 /* index 3 */
+ stcopr r1, AMEVTYPER13 /* index 3 */
bx lr
- stcopr r0, AMEVTYPER14 /* index 4 */
+ stcopr r1, AMEVTYPER14 /* index 4 */
bx lr
- stcopr r0, AMEVTYPER15 /* index 5 */
+ stcopr r1, AMEVTYPER15 /* index 5 */
bx lr
- stcopr r0, AMEVTYPER16 /* index 6 */
+ stcopr r1, AMEVTYPER16 /* index 6 */
bx lr
- stcopr r0, AMEVTYPER17 /* index 7 */
+ stcopr r1, AMEVTYPER17 /* index 7 */
bx lr
- stcopr r0, AMEVTYPER18 /* index 8 */
+ stcopr r1, AMEVTYPER18 /* index 8 */
bx lr
- stcopr r0, AMEVTYPER19 /* index 9 */
+ stcopr r1, AMEVTYPER19 /* index 9 */
bx lr
- stcopr r0, AMEVTYPER1A /* index 10 */
+ stcopr r1, AMEVTYPER1A /* index 10 */
bx lr
- stcopr r0, AMEVTYPER1B /* index 11 */
+ stcopr r1, AMEVTYPER1B /* index 11 */
bx lr
- stcopr r0, AMEVTYPER1C /* index 12 */
+ stcopr r1, AMEVTYPER1C /* index 12 */
bx lr
- stcopr r0, AMEVTYPER1D /* index 13 */
+ stcopr r1, AMEVTYPER1D /* index 13 */
bx lr
- stcopr r0, AMEVTYPER1E /* index 14 */
+ stcopr r1, AMEVTYPER1E /* index 14 */
bx lr
- stcopr r0, AMEVTYPER1F /* index 15 */
+ stcopr r1, AMEVTYPER1F /* index 15 */
bx lr
endfunc amu_group1_set_evtype_internal
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 7d39f35..5d556e5 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -37,7 +37,7 @@
{
uint64_t v;
- if (!amu_supported())
+ if (amu_supported() == 0)
return;
if (el2_unused) {
@@ -67,7 +67,7 @@
/* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
@@ -76,7 +76,7 @@
/* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
@@ -86,7 +86,7 @@
/* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
@@ -95,7 +95,7 @@
/* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
@@ -108,7 +108,7 @@
*/
void amu_group1_set_evtype(int idx, unsigned int val)
{
- assert(amu_supported());
+ assert(amu_supported() != 0);
assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
@@ -120,7 +120,7 @@
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i;
- if (!amu_supported())
+ if (amu_supported() == 0)
return (void *)-1;
/* Assert that group 0/1 counter configuration is what we expect */
@@ -154,7 +154,7 @@
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i;
- if (!amu_supported())
+ if (amu_supported() == 0)
return (void *)-1;
/* Counters were disabled in `amu_context_save()` */
diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c
index 3b297f2..a9bed49 100644
--- a/lib/extensions/spe/spe.c
+++ b/lib/extensions/spe/spe.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,71 +14,72 @@
*/
#define psb_csync() asm volatile("hint #17")
-void spe_enable(int el2_unused)
+int spe_supported(void)
{
uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
- if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
- uint64_t v;
+ return (features & ID_AA64DFR0_PMS_MASK) == 1;
+}
- if (el2_unused) {
- /*
- * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
- * profiling controls to EL2.
- *
- * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
- * state. Accesses to profiling buffer controls at
- * Non-secure EL1 are not trapped to EL2.
- */
- v = read_mdcr_el2();
- v &= ~MDCR_EL2_TPMS;
- v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
- write_mdcr_el2(v);
- }
+void spe_enable(int el2_unused)
+{
+ uint64_t v;
+ if (spe_supported() == 0)
+ return;
+
+ if (el2_unused) {
/*
- * MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
- * and disabled in secure state. Accesses to SPE registers at
- * S-EL1 generate trap exceptions to EL3.
+ * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
+ * profiling controls to EL2.
+ *
+ * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
+ * state. Accesses to profiling buffer controls at
+ * Non-secure EL1 are not trapped to EL2.
*/
- v = read_mdcr_el3();
- v |= MDCR_NSPB(MDCR_NSPB_EL1);
- write_mdcr_el3(v);
+ v = read_mdcr_el2();
+ v &= ~MDCR_EL2_TPMS;
+ v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
+ write_mdcr_el2(v);
}
+
+ /*
+ * MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
+ * and disabled in secure state. Accesses to SPE registers at
+ * S-EL1 generate trap exceptions to EL3.
+ */
+ v = read_mdcr_el3();
+ v |= MDCR_NSPB(MDCR_NSPB_EL1);
+ write_mdcr_el3(v);
}
void spe_disable(void)
{
- uint64_t features;
+ uint64_t v;
- features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
- if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
- uint64_t v;
+ if (spe_supported() == 0)
+ return;
- /* Drain buffered data */
- psb_csync();
- dsbnsh();
+ /* Drain buffered data */
+ psb_csync();
+ dsbnsh();
- /* Disable profiling buffer */
- v = read_pmblimitr_el1();
- v &= ~(1ULL << 0);
- write_pmblimitr_el1(v);
- isb();
- }
+ /* Disable profiling buffer */
+ v = read_pmblimitr_el1();
+ v &= ~(1ULL << 0);
+ write_pmblimitr_el1(v);
+ isb();
}
static void *spe_drain_buffers_hook(const void *arg)
{
- uint64_t features;
-
- features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
- if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
- /* Drain buffered data */
- psb_csync();
- dsbnsh();
- }
+ if (spe_supported() == 0)
+ return (void *)-1;
+ /* Drain buffered data */
+ psb_csync();
+ dsbnsh();
return 0;
}
diff --git a/lib/extensions/sve/sve.c b/lib/extensions/sve/sve.c
index 14e51bd..6442487 100644
--- a/lib/extensions/sve/sve.c
+++ b/lib/extensions/sve/sve.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,117 +9,120 @@
#include <pubsub.h>
#include <sve.h>
-static void *disable_sve_hook(const void *arg)
+int sve_supported(void)
{
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
- if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
- uint64_t cptr;
+ return (features & ID_AA64PFR0_SVE_MASK) == 1;
+}
- /*
- * Disable SVE, SIMD and FP access for the Secure world.
- * As the SIMD/FP registers are part of the SVE Z-registers, any
- * use of SIMD/FP functionality will corrupt the SVE registers.
- * Therefore it is necessary to prevent use of SIMD/FP support
- * in the Secure world as well as SVE functionality.
- */
- cptr = read_cptr_el3();
- cptr = (cptr | TFP_BIT) & ~(CPTR_EZ_BIT);
- write_cptr_el3(cptr);
+static void *disable_sve_hook(const void *arg)
+{
+ uint64_t cptr;
- /*
- * No explicit ISB required here as ERET to switch to Secure
- * world covers it
- */
- }
+ if (sve_supported() == 0)
+ return (void *)-1;
+
+ /*
+ * Disable SVE, SIMD and FP access for the Secure world.
+ * As the SIMD/FP registers are part of the SVE Z-registers, any
+ * use of SIMD/FP functionality will corrupt the SVE registers.
+ * Therefore it is necessary to prevent use of SIMD/FP support
+ * in the Secure world as well as SVE functionality.
+ */
+ cptr = read_cptr_el3();
+ cptr = (cptr | TFP_BIT) & ~(CPTR_EZ_BIT);
+ write_cptr_el3(cptr);
+
+ /*
+ * No explicit ISB required here as ERET to switch to Secure
+ * world covers it
+ */
return 0;
}
static void *enable_sve_hook(const void *arg)
{
- uint64_t features;
+ uint64_t cptr;
- features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
- if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
- uint64_t cptr;
+ if (sve_supported() == 0)
+ return (void *)-1;
- /*
- * Enable SVE, SIMD and FP access for the Non-secure world.
- */
- cptr = read_cptr_el3();
- cptr = (cptr | CPTR_EZ_BIT) & ~(TFP_BIT);
- write_cptr_el3(cptr);
+ /*
+ * Enable SVE, SIMD and FP access for the Non-secure world.
+ */
+ cptr = read_cptr_el3();
+ cptr = (cptr | CPTR_EZ_BIT) & ~(TFP_BIT);
+ write_cptr_el3(cptr);
- /*
- * No explicit ISB required here as ERET to switch to Non-secure
- * world covers it
- */
- }
+ /*
+ * No explicit ISB required here as ERET to switch to Non-secure
+ * world covers it
+ */
return 0;
}
void sve_enable(int el2_unused)
{
- uint64_t features;
+ uint64_t cptr;
- features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
- if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
- uint64_t cptr;
+ if (sve_supported() == 0)
+ return;
+
#if CTX_INCLUDE_FPREGS
- /*
- * CTX_INCLUDE_FPREGS is not supported on SVE enabled systems.
- */
- assert(0);
+ /*
+ * CTX_INCLUDE_FPREGS is not supported on SVE enabled systems.
+ */
+ assert(0);
#endif
- /*
- * Update CPTR_EL3 to enable access to SVE functionality for the
- * Non-secure world.
- * NOTE - assumed that CPTR_EL3.TFP is set to allow access to
- * the SIMD, floating-point and SVE support.
- *
- * CPTR_EL3.EZ: Set to 1 to enable access to SVE functionality
- * in the Non-secure world.
- */
- cptr = read_cptr_el3();
- cptr |= CPTR_EZ_BIT;
- write_cptr_el3(cptr);
+ /*
+ * Update CPTR_EL3 to enable access to SVE functionality for the
+ * Non-secure world.
+ * NOTE - assumed that CPTR_EL3.TFP is set to allow access to
+ * the SIMD, floating-point and SVE support.
+ *
+ * CPTR_EL3.EZ: Set to 1 to enable access to SVE functionality
+ * in the Non-secure world.
+ */
+ cptr = read_cptr_el3();
+ cptr |= CPTR_EZ_BIT;
+ write_cptr_el3(cptr);
- /*
- * Need explicit ISB here to guarantee that update to ZCR_ELx
- * and CPTR_EL2.TZ do not result in trap to EL3.
- */
- isb();
+ /*
+ * Need explicit ISB here to guarantee that update to ZCR_ELx
+ * and CPTR_EL2.TZ do not result in trap to EL3.
+ */
+ isb();
+
+ /*
+ * Ensure lower ELs have access to full vector length.
+ */
+ write_zcr_el3(ZCR_EL3_LEN_MASK);
+ if (el2_unused) {
/*
- * Ensure lower ELs have access to full vector length.
+ * Update CPTR_EL2 to enable access to SVE functionality
+ * for Non-secure world, EL2 and Non-secure EL1 and EL0.
+ * NOTE - assumed that CPTR_EL2.TFP is set to allow
+ * access to the SIMD, floating-point and SVE support.
+ *
+ * CPTR_EL2.TZ: Set to 0 to enable access to SVE support
+ * for EL2 and Non-secure EL1 and EL0.
*/
- write_zcr_el3(ZCR_EL3_LEN_MASK);
+ cptr = read_cptr_el2();
+ cptr &= ~(CPTR_EL2_TZ_BIT);
+ write_cptr_el2(cptr);
- if (el2_unused) {
- /*
- * Update CPTR_EL2 to enable access to SVE functionality
- * for Non-secure world, EL2 and Non-secure EL1 and EL0.
- * NOTE - assumed that CPTR_EL2.TFP is set to allow
- * access to the SIMD, floating-point and SVE support.
- *
- * CPTR_EL2.TZ: Set to 0 to enable access to SVE support
- * for EL2 and Non-secure EL1 and EL0.
- */
- cptr = read_cptr_el2();
- cptr &= ~(CPTR_EL2_TZ_BIT);
- write_cptr_el2(cptr);
-
- /*
- * Ensure lower ELs have access to full vector length.
- */
- write_zcr_el2(ZCR_EL2_LEN_MASK);
- }
/*
- * No explicit ISB required here as ERET to switch to
- * Non-secure world covers it.
+ * Ensure lower ELs have access to full vector length.
*/
+ write_zcr_el2(ZCR_EL2_LEN_MASK);
}
+ /*
+ * No explicit ISB required here as ERET to switch to
+ * Non-secure world covers it.
+ */
}
SUBSCRIBE_TO_EVENT(cm_exited_normal_world, disable_sve_hook);
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
index 9373d4f..a29a29c 100644
--- a/lib/psci/aarch32/psci_helpers.S
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -91,6 +91,28 @@
stcopr r0, SCTLR
isb
+#if PLAT_XLAT_TABLES_DYNAMIC
+ /* ---------------------------------------------
+ * During warm boot the MMU is enabled with data
+ * cache disabled, then the interconnect is set
+ * up and finally the data cache is enabled.
+ *
+ * During this period, if another CPU modifies
+ * the translation tables, the MMU table walker
+ * may read the old entries. This is only a
+ * problem for dynamic regions, the warm boot
+ * code isn't affected because it is static.
+ *
+ * Invalidate all TLB entries loaded while the
+ * CPU wasn't coherent with the rest of the
+ * system.
+ * ---------------------------------------------
+ */
+ stcopr r0, TLBIALL
+ dsb ish
+ isb
+#endif
+
pop {r12, pc}
endfunc psci_do_pwrup_cache_maintenance
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index afe21eb..d37ca76 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -115,6 +115,28 @@
msr sctlr_el3, x0
isb
+#if PLAT_XLAT_TABLES_DYNAMIC
+ /* ---------------------------------------------
+ * During warm boot the MMU is enabled with data
+ * cache disabled, then the interconnect is set
+ * up and finally the data cache is enabled.
+ *
+ * During this period, if another CPU modifies
+ * the translation tables, the MMU table walker
+ * may read the old entries. This is only a
+ * problem for dynamic regions, the warm boot
+ * code isn't affected because it is static.
+ *
+ * Invalidate all TLB entries loaded while the
+ * CPU wasn't coherent with the rest of the
+ * system.
+ * ---------------------------------------------
+ */
+ tlbi alle3
+ dsb ish
+ isb
+#endif
+
ldp x29, x30, [sp], #16
ret
endfunc psci_do_pwrup_cache_maintenance
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 6857c1f..8b913fb 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -182,7 +182,9 @@
ENABLE_AMU := 1
ifeq (${ENABLE_AMU},1)
-BL31_SOURCES += lib/cpus/aarch64/cortex_a75_pubsub.c
+BL31_SOURCES += lib/cpus/aarch64/cortex_a75_pubsub.c \
+ lib/cpus/aarch64/cpuamu.c \
+ lib/cpus/aarch64/cpuamu_helpers.S
endif
ifneq (${ENABLE_STACK_PROTECTOR},0)
diff --git a/plat/mediatek/mt6795/bl31.ld.S b/plat/mediatek/mt6795/bl31.ld.S
index eacb1b2..0fbd3f7 100644
--- a/plat/mediatek/mt6795/bl31.ld.S
+++ b/plat/mediatek/mt6795/bl31.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -132,7 +132,8 @@
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unecessary zero init
+ * the .bss section. The tables are initialized to zero by the translation
+ * tables library.
*/
xlat_table (NOLOAD) : {
*(xlat_table)
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index c564f8b..3d06e0b 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -549,9 +549,10 @@
* preempt Secure execution. However, for
* yielding SMCs, we want preemption to happen;
* so explicitly allow NS preemption in this
- * case.
+ * case, and supply the preemption return code
+ * for TSP.
*/
- ehf_allow_ns_preemption();
+ ehf_allow_ns_preemption(TSP_PREEMPTED);
#endif
}
@@ -662,9 +663,10 @@
#if EL3_EXCEPTION_HANDLING
/*
* Allow the resumed yielding SMC processing to be preempted by
- * Non-secure interrupts.
+ * Non-secure interrupts. Also, supply the preemption return
+ * code for TSP.
*/
- ehf_allow_ns_preemption();
+ ehf_allow_ns_preemption(TSP_PREEMPTED);
#endif
/* We just need to return to the preempted point in
diff --git a/services/std_svc/sdei/sdei_intr_mgmt.c b/services/std_svc/sdei/sdei_intr_mgmt.c
index 42bf46d..2717ea4 100644
--- a/services/std_svc/sdei/sdei_intr_mgmt.c
+++ b/services/std_svc/sdei/sdei_intr_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -556,7 +556,7 @@
unsigned int client_el = sdei_client_el();
/* Return error if called without an active event */
- disp_ctx = pop_dispatch();
+ disp_ctx = get_outstanding_dispatch();
if (!disp_ctx)
return SDEI_EDENY;
@@ -566,15 +566,8 @@
map = disp_ctx->map;
assert(map);
-
se = get_event_entry(map);
- SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
- map->ev_num, read_spsr_el3(), read_elr_el3());
-
- if (is_event_shared(map))
- sdei_map_lock(map);
-
act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
if (!can_sdei_state_trans(se, act)) {
if (is_event_shared(map))
@@ -582,6 +575,15 @@
return SDEI_EDENY;
}
+ /* Having done sanity checks, pop dispatch */
+ pop_dispatch();
+
+ SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
+ map->ev_num, read_spsr_el3(), read_elr_el3());
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
/*
* Restore Non-secure to how it was originally interrupted. Once done,
* it's up-to-date with the saved copy.