Merge pull request #1219 from antonio-nino-diaz-arm/an/mm-version
SPM: Fix version header definitions
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index d8fbb9b..9b7735f 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -14,6 +14,26 @@
.globl runtime_exceptions
+ .globl sync_exception_sp_el0
+ .globl irq_sp_el0
+ .globl fiq_sp_el0
+ .globl serror_sp_el0
+
+ .globl sync_exception_sp_elx
+ .globl irq_sp_elx
+ .globl fiq_sp_elx
+ .globl serror_sp_elx
+
+ .globl sync_exception_aarch64
+ .globl irq_aarch64
+ .globl fiq_aarch64
+ .globl serror_aarch64
+
+ .globl sync_exception_aarch32
+ .globl irq_aarch32
+ .globl fiq_aarch32
+ .globl serror_aarch32
+
/* ---------------------------------------------------------------------
* This macro handles Synchronous exceptions.
* Only SMC exceptions are supported.
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index fdcc931..2db4856 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -51,13 +51,19 @@
endif
ifeq (${ENABLE_AMU},1)
-BL31_SOURCES += lib/extensions/amu/aarch64/amu.c
+BL31_SOURCES += lib/extensions/amu/aarch64/amu.c \
+ lib/extensions/amu/aarch64/amu_helpers.S
endif
ifeq (${ENABLE_SVE_FOR_NS},1)
BL31_SOURCES += lib/extensions/sve/sve.c
endif
+ifeq (${WORKAROUND_CVE_2017_5715},1)
+BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S \
+ lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
+endif
+
BL31_LINKERFILE := bl31/bl31.ld.S
# Flag used to indicate if Crash reporting via console should be included
diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst
index f74b459..014817d 100644
--- a/docs/cpu-specific-build-macros.rst
+++ b/docs/cpu-specific-build-macros.rst
@@ -11,6 +11,15 @@
operations framework to enable errata workarounds and to enable optimizations
for a specific CPU on a platform.
+Security Vulnerability Workarounds
+----------------------------------
+
+ARM Trusted Firmware exports a series of build flags which control which
+security vulnerability workarounds should be applied at runtime.
+
+- ``WORKAROUND_CVE_2017_5715``: Enables the security workaround for
+ `CVE-2017-5715`_. Defaults to 1.
+
CPU Errata Workarounds
----------------------
@@ -142,6 +151,7 @@
*Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.*
+.. _CVE-2017-5715: http://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2017-5715
.. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/Cortex_A53_MPCore_Software_Developers_Errata_Notice.pdf
.. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/cortex_a57_mpcore_software_developers_errata_notice.pdf
.. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index f020ec9..2e2cc4f 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -549,6 +549,22 @@
doesn't print anything to the console. If ``PLAT_LOG_LEVEL_ASSERT`` isn't
defined, it defaults to ``LOG_LEVEL``.
+If the platform port uses the Activity Monitor Unit, the following constants
+may be defined:
+
+- **PLAT\_AMU\_GROUP1\_COUNTERS\_MASK**
+ This mask reflects the set of group counters that should be enabled. The
+ maximum number of group 1 counters supported by AMUv1 is 16 so the mask
+ can be at most 0xffff. If the platform does not define this mask, no group 1
+ counters are enabled. If the platform defines this mask, the following
+ constant needs to also be defined.
+
+- **PLAT\_AMU\_GROUP1\_NR\_COUNTERS**
+ This value is used to allocate an array to save and restore the counters
+ specified by ``PLAT_AMU_GROUP1_COUNTERS_MASK`` on CPU suspend.
+ This value should be equal to the highest bit position set in the
+ mask, plus 1. The maximum number of group 1 counters in AMUv1 is 16.
+
File : plat\_macros.S [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1128,6 +1144,9 @@
for performing any remaining platform-specific setup that can occur after the
MMU and data cache have been enabled.
+if support for multiple boot sources is required, it initializes the boot
+sequence used by plat\_try\_next\_boot\_source().
+
In ARM standard platforms, this function initializes the storage abstraction
layer used to load the next bootloader image.
diff --git a/include/bl32/payloads/tlk.h b/include/bl32/payloads/tlk.h
index 4e06bcd..941b6cc 100644
--- a/include/bl32/payloads/tlk.h
+++ b/include/bl32/payloads/tlk.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -20,6 +20,7 @@
*/
#define TLK_REGISTER_LOGBUF TLK_TOS_YIELD_FID(0x1)
#define TLK_REGISTER_REQBUF TLK_TOS_YIELD_FID(0x2)
+#define TLK_REGISTER_NS_DRAM TLK_TOS_YIELD_FID(0x4)
#define TLK_RESUME_FID TLK_TOS_YIELD_FID(0x100)
#define TLK_SYSTEM_SUSPEND TLK_TOS_YIELD_FID(0xE001)
#define TLK_SYSTEM_RESUME TLK_TOS_YIELD_FID(0xE002)
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
index 63a0fa7..defd4a2 100644
--- a/include/common/aarch64/el3_common_macros.S
+++ b/include/common/aarch64/el3_common_macros.S
@@ -13,7 +13,7 @@
/*
* Helper macro to initialise EL3 registers we care about.
*/
- .macro el3_arch_init_common _exception_vectors
+ .macro el3_arch_init_common
/* ---------------------------------------------------------------------
* SCTLR_EL3 has already been initialised - read current value before
* modifying.
@@ -50,14 +50,6 @@
#endif /* IMAGE_BL31 */
/* ---------------------------------------------------------------------
- * Set the exception vectors.
- * ---------------------------------------------------------------------
- */
- adr x0, \_exception_vectors
- msr vbar_el3, x0
- isb
-
- /* ---------------------------------------------------------------------
* Initialise SCR_EL3, setting all fields rather than relying on hw.
* All fields are architecturally UNKNOWN on reset. The following fields
* do not change during the TF lifetime. The remaining fields are set to
@@ -221,6 +213,14 @@
.endif /* _warm_boot_mailbox */
/* ---------------------------------------------------------------------
+ * Set the exception vectors.
+ * ---------------------------------------------------------------------
+ */
+ adr x0, \_exception_vectors
+ msr vbar_el3, x0
+ isb
+
+ /* ---------------------------------------------------------------------
* It is a cold boot.
* Perform any processor specific actions upon reset e.g. cache, TLB
* invalidations etc.
@@ -228,7 +228,7 @@
*/
bl reset_handler
- el3_arch_init_common \_exception_vectors
+ el3_arch_init_common
.if \_secondary_cold_boot
/* -------------------------------------------------------------
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index 0230195..beae5d0 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -287,6 +287,11 @@
DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr03, AMEVCNTR03)
+
/*
* TLBI operation prototypes
*/
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index 96e2d5f..91aa484 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -117,6 +117,9 @@
#define ID_AA64PFR0_SVE_SHIFT U(32)
#define ID_AA64PFR0_SVE_MASK U(0xf)
#define ID_AA64PFR0_SVE_LENGTH U(4)
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK U(0xf)
+#define ID_AA64PFR0_CSV2_LENGTH U(4)
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
#define ID_AA64DFR0_PMS_SHIFT U(32)
@@ -337,6 +340,11 @@
#define SPSR_T_ARM U(0x0)
#define SPSR_T_THUMB U(0x1)
+#define SPSR_M_SHIFT U(4)
+#define SPSR_M_MASK U(0x1)
+#define SPSR_M_AARCH64 U(0x0)
+#define SPSR_M_AARCH32 U(0x1)
+
#define DISABLE_ALL_EXCEPTIONS \
(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
@@ -656,4 +664,45 @@
#define AMEVTYPER02_EL0 S3_3_C13_C6_2
#define AMEVTYPER03_EL0 S3_3_C13_C6_3
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0 S3_3_C13_C12_0
+#define AMEVCNTR11_EL0 S3_3_C13_C12_1
+#define AMEVCNTR12_EL0 S3_3_C13_C12_2
+#define AMEVCNTR13_EL0 S3_3_C13_C12_3
+#define AMEVCNTR14_EL0 S3_3_C13_C12_4
+#define AMEVCNTR15_EL0 S3_3_C13_C12_5
+#define AMEVCNTR16_EL0 S3_3_C13_C12_6
+#define AMEVCNTR17_EL0 S3_3_C13_C12_7
+#define AMEVCNTR18_EL0 S3_3_C13_C13_0
+#define AMEVCNTR19_EL0 S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0 S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0 S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0 S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0 S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0 S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0 S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0 S3_3_C13_C14_0
+#define AMEVTYPER11_EL0 S3_3_C13_C14_1
+#define AMEVTYPER12_EL0 S3_3_C13_C14_2
+#define AMEVTYPER13_EL0 S3_3_C13_C14_3
+#define AMEVTYPER14_EL0 S3_3_C13_C14_4
+#define AMEVTYPER15_EL0 S3_3_C13_C14_5
+#define AMEVTYPER16_EL0 S3_3_C13_C14_6
+#define AMEVTYPER17_EL0 S3_3_C13_C14_7
+#define AMEVTYPER18_EL0 S3_3_C13_C15_0
+#define AMEVTYPER19_EL0 S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0 S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0 S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0 S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0 S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_LENGTH U(8)
+#define AMCGCR_EL0_CG1NC_MASK U(0xff)
+
#endif /* __ARCH_H__ */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 831dfb0..485ed43 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -322,6 +322,7 @@
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h
index d68c957..940125d 100644
--- a/include/lib/cpus/aarch64/cortex_a75.h
+++ b/include/lib/cpus/aarch64/cortex_a75.h
@@ -50,7 +50,19 @@
* CPUAMEVTYPER<n> register and are disabled by default. Platforms may
* enable this with suitable programming.
*/
+#define CORTEX_A75_AMU_NR_COUNTERS 5
#define CORTEX_A75_AMU_GROUP0_MASK 0x7
#define CORTEX_A75_AMU_GROUP1_MASK (0 << 3)
+#ifndef __ASSEMBLY__
+#include <stdint.h>
+
+uint64_t cortex_a75_amu_cnt_read(int idx);
+void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+#endif /* __ASSEMBLY__ */
+
#endif /* __CORTEX_A75_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 5889904..5e212ec 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -46,12 +46,26 @@
#define CTX_GPREG_SP_EL0 U(0xf8)
#define CTX_GPREGS_END U(0x100)
+#if WORKAROUND_CVE_2017_5715
+#define CTX_CVE_2017_5715_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_CVE_2017_5715_QUAD0 U(0x0)
+#define CTX_CVE_2017_5715_QUAD1 U(0x8)
+#define CTX_CVE_2017_5715_QUAD2 U(0x10)
+#define CTX_CVE_2017_5715_QUAD3 U(0x18)
+#define CTX_CVE_2017_5715_QUAD4 U(0x20)
+#define CTX_CVE_2017_5715_QUAD5 U(0x28)
+#define CTX_CVE_2017_5715_END U(0x30)
+#else
+#define CTX_CVE_2017_5715_OFFSET CTX_GPREGS_OFFSET
+#define CTX_CVE_2017_5715_END CTX_GPREGS_END
+#endif
+
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'el3_state'
* structure at their correct offsets. Note that some of the registers are only
* 32-bits wide but are stored as 64-bit values for convenience
******************************************************************************/
-#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_EL3STATE_OFFSET (CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_END)
#define CTX_SCR_EL3 U(0x0)
#define CTX_RUNTIME_SP U(0x8)
#define CTX_SPSR_EL3 U(0x10)
@@ -186,6 +200,9 @@
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
+#if WORKAROUND_CVE_2017_5715
+#define CTX_CVE_2017_5715_ALL (CTX_CVE_2017_5715_END >> DWORD_SHIFT)
+#endif
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
@@ -201,6 +218,10 @@
*/
DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
+#if WORKAROUND_CVE_2017_5715
+DEFINE_REG_STRUCT(cve_2017_5715_regs, CTX_CVE_2017_5715_ALL);
+#endif
+
/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during switches from one security state to
@@ -242,6 +263,9 @@
*/
typedef struct cpu_context {
gp_regs_t gpregs_ctx;
+#if WORKAROUND_CVE_2017_5715
+ cve_2017_5715_regs_t cve_2017_5715_regs_ctx;
+#endif
el3_state_t el3state_ctx;
el1_sys_regs_t sysregs_ctx;
#if CTX_INCLUDE_FPREGS
diff --git a/include/lib/el3_runtime/pubsub_events.h b/include/lib/el3_runtime/pubsub_events.h
index 9cfedb4..64b3f63 100644
--- a/include/lib/el3_runtime/pubsub_events.h
+++ b/include/lib/el3_runtime/pubsub_events.h
@@ -17,6 +17,13 @@
*/
REGISTER_PUBSUB_EVENT(psci_cpu_on_finish);
+/*
+ * These events are published before/after a CPU has been powered down/up
+ * via the PSCI CPU SUSPEND API.
+ */
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_start);
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_finish);
+
#ifdef AARCH64
/*
* These events are published by the AArch64 context management framework
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
index bbefe8f..faa0ee1 100644
--- a/include/lib/extensions/amu.h
+++ b/include/lib/extensions/amu.h
@@ -7,9 +7,39 @@
#ifndef __AMU_H__
#define __AMU_H__
-/* Enable all group 0 counters */
+#include <sys/cdefs.h> /* for CASSERT() */
+#include <cassert.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/* All group 0 counters */
#define AMU_GROUP0_COUNTERS_MASK 0xf
+#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
+#define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK
+#else
+#define AMU_GROUP1_COUNTERS_MASK 0
+#endif
+
+#ifdef PLAT_AMU_GROUP1_NR_COUNTERS
+#define AMU_GROUP1_NR_COUNTERS PLAT_AMU_GROUP1_NR_COUNTERS
+#else
+#define AMU_GROUP1_NR_COUNTERS 0
+#endif
+
+CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
+CASSERT(AMU_GROUP1_NR_COUNTERS <= 16, invalid_amu_group1_nr_counters);
+
+int amu_supported(void);
void amu_enable(int el2_unused);
+/* Group 0 configuration helpers */
+uint64_t amu_group0_cnt_read(int idx);
+void amu_group0_cnt_write(int idx, uint64_t val);
+
+/* Group 1 configuration helpers */
+uint64_t amu_group1_cnt_read(int idx);
+void amu_group1_cnt_write(int idx, uint64_t val);
+void amu_group1_set_evtype(int idx, unsigned int val);
+
#endif /* __AMU_H__ */
diff --git a/include/lib/extensions/amu_private.h b/include/lib/extensions/amu_private.h
new file mode 100644
index 0000000..0c660bb
--- /dev/null
+++ b/include/lib/extensions/amu_private.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __AMU_PRIVATE_H__
+#define __AMU_PRIVATE_H__
+
+#include <stdint.h>
+
+uint64_t amu_group0_cnt_read_internal(int idx);
+void amu_group0_cnt_write_internal(int idx, uint64_t);
+
+uint64_t amu_group1_cnt_read_internal(int idx);
+void amu_group1_cnt_write_internal(int idx, uint64_t);
+void amu_group1_set_evtype_internal(int idx, unsigned int val);
+
+#endif /* __AMU_PRIVATE_H__ */
diff --git a/include/plat/arm/board/common/board_arm_def.h b/include/plat/arm/board/common/board_arm_def.h
index e0c3c86..888629e 100644
--- a/include/plat/arm/board/common/board_arm_def.h
+++ b/include/plat/arm/board/common/board_arm_def.h
@@ -90,7 +90,7 @@
* PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
* little space for growth.
*/
-#define PLAT_ARM_MAX_BL31_SIZE 0x1E000
+#define PLAT_ARM_MAX_BL31_SIZE 0x20000
#ifdef AARCH32
/*
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index a720e98..683be47 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -383,6 +383,11 @@
bl errata_a57_859972_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_mmu_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index b034125..93821b7 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -110,6 +110,12 @@
mov x0, x18
bl errata_a72_859971_wa
#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_mmu_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index f642816..c43f07e 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -36,6 +36,11 @@
endfunc cortex_a73_disable_smp
func cortex_a73_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* Clobbers : x0
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 4cab9e4..e66ad06 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -11,7 +11,120 @@
#include <plat_macros.S>
#include <cortex_a75.h>
+ .globl cortex_a75_amu_cnt_read
+ .globl cortex_a75_amu_cnt_write
+ .globl cortex_a75_amu_read_cpuamcntenset_el0
+ .globl cortex_a75_amu_read_cpuamcntenclr_el0
+ .globl cortex_a75_amu_write_cpuamcntenset_el0
+ .globl cortex_a75_amu_write_cpuamcntenclr_el0
+
+/*
+ * uint64_t cortex_a75_amu_cnt_read(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func cortex_a75_amu_cnt_read
+ adr x1, 1f
+ lsl x0, x0, #3
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, CPUAMEVCNTR0_EL0
+ ret
+ mrs x0, CPUAMEVCNTR1_EL0
+ ret
+ mrs x0, CPUAMEVCNTR2_EL0
+ ret
+ mrs x0, CPUAMEVCNTR3_EL0
+ ret
+ mrs x0, CPUAMEVCNTR4_EL0
+ ret
+endfunc cortex_a75_amu_cnt_read
+
+/*
+ * void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func cortex_a75_amu_cnt_write
+ adr x2, 1f
+ lsl x0, x0, #3
+ add x2, x2, x0
+ br x2
+
+1:
+ msr CPUAMEVCNTR0_EL0, x0
+ ret
+ msr CPUAMEVCNTR1_EL0, x0
+ ret
+ msr CPUAMEVCNTR2_EL0, x0
+ ret
+ msr CPUAMEVCNTR3_EL0, x0
+ ret
+ msr CPUAMEVCNTR4_EL0, x0
+ ret
+endfunc cortex_a75_amu_cnt_write
+
+/*
+ * unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+ *
+ * Read the `CPUAMCNTENSET_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cortex_a75_amu_read_cpuamcntenset_el0
+ mrs x0, CPUAMCNTENSET_EL0
+ ret
+endfunc cortex_a75_amu_read_cpuamcntenset_el0
+
+/*
+ * unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+ *
+ * Read the `CPUAMCNTENCLR_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cortex_a75_amu_read_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cortex_a75_amu_read_cpuamcntenclr_el0
+
+/*
+ * void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
+ */
+func cortex_a75_amu_write_cpuamcntenset_el0
+ msr CPUAMCNTENSET_EL0, x0
+ ret
+endfunc cortex_a75_amu_write_cpuamcntenset_el0
+
+/*
+ * void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
+ */
+func cortex_a75_amu_write_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cortex_a75_amu_write_cpuamcntenclr_el0
+
func cortex_a75_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
+ /*
+ * If the field equals to 1 then branch targets trained in one
+ * context cannot affect speculative execution in a different context.
+ */
+ cmp x0, #1
+ beq 1f
+
+ adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x0
+1:
+#endif
+
#if ENABLE_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
diff --git a/lib/cpus/aarch64/cortex_a75_pubsub.c b/lib/cpus/aarch64/cortex_a75_pubsub.c
new file mode 100644
index 0000000..c1089a6
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75_pubsub.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cortex_a75.h>
+#include <pubsub_events.h>
+#include <platform.h>
+
+struct amu_ctx {
+ uint64_t cnts[CORTEX_A75_AMU_NR_COUNTERS];
+ uint16_t mask;
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+static void *cortex_a75_context_save(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int midr;
+ unsigned int midr_mask;
+ int i;
+
+ midr = read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
+ return 0;
+
+ /* Save counter configuration */
+ ctx->mask = cortex_a75_amu_read_cpuamcntenset_el0();
+
+ /* Ensure counters are disabled */
+ cortex_a75_amu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Save counters */
+ for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
+ ctx->cnts[i] = cortex_a75_amu_cnt_read(i);
+
+ return 0;
+}
+
+static void *cortex_a75_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int midr;
+ unsigned int midr_mask;
+ int i;
+
+ midr = read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
+ return 0;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Counters were disabled in `cortex_a75_context_save()` */
+ assert(cortex_a75_amu_read_cpuamcntenset_el0() == 0);
+
+ /* Restore counters */
+ for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
+ cortex_a75_amu_cnt_write(i, ctx->cnts[i]);
+ isb();
+
+ /* Restore counter configuration */
+ cortex_a75_amu_write_cpuamcntenset_el0(ctx->mask);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_a75_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_a75_context_restore);
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
new file mode 100644
index 0000000..cd29266
--- /dev/null
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl workaround_bpiall_vbar0_runtime_exceptions
+
+#define EMIT_BPIALL 0xee070fd5
+#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v
+#define EMIT_SMC 0xe1600070
+
+ .macro enter_workaround _stub_name
+ /* Save GP regs */
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ adr x4, \_stub_name
+
+ /*
+ * Load SPSR_EL3 and VBAR_EL3. SPSR_EL3 is set up to have
+ * all interrupts masked in preparation to running the workaround
+ * stub in S-EL1. VBAR_EL3 points to the vector table that
+ * will handle the SMC back from the workaround stub.
+ */
+ ldp x0, x1, [x4, #0]
+
+ /*
+ * Load SCTLR_EL1 and ELR_EL3. SCTLR_EL1 is configured to disable
+ * the MMU in S-EL1. ELR_EL3 points to the appropriate stub in S-EL1.
+ */
+ ldp x2, x3, [x4, #16]
+
+ mrs x4, scr_el3
+ mrs x5, spsr_el3
+ mrs x6, elr_el3
+ mrs x7, sctlr_el1
+ mrs x8, esr_el3
+
+ /* Preserve system registers in the workaround context */
+ stp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
+ stp x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
+ stp x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+
+ /*
+ * Setting SCR_EL3 to all zeroes means that the NS, RW
+ * and SMD bits are configured as expected.
+ */
+ msr scr_el3, xzr
+
+ /*
+ * Reload system registers with the crafted values
+ * in preparation for entry in S-EL1.
+ */
+ msr spsr_el3, x0
+ msr vbar_el3, x1
+ msr sctlr_el1, x2
+ msr elr_el3, x3
+
+ eret
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used at runtime to enter the workaround at
+ * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround
+ * is not enabled, the existing runtime exception vector table is used.
+ * ---------------------------------------------------------------------
+ */
+vector_base workaround_bpiall_vbar0_runtime_exceptions
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ /*
+ * Since each vector table entry is 128 bytes, we can store the
+ * stub context in the unused space to minimize memory footprint.
+ */
+aarch32_stub_smc:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(1)
+ .word EMIT_SMC
+aarch32_stub_ctx_smc:
+ /* Mask all interrupts and set AArch32 Supervisor mode */
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+
+ /*
+ * VBAR_EL3 points to vbar1 which is the vector table
+ * used while the workaround is executing.
+ */
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+
+ /* Setup SCTLR_EL1 with MMU off and I$ on */
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+
+ /* ELR_EL3 is setup to point to the sync exception stub in AArch32 */
+ .quad aarch32_stub_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
+
+vector_entry workaround_bpiall_vbar0_irq_sp_el0
+ b irq_sp_el0
+aarch32_stub_irq:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(2)
+ .word EMIT_SMC
+aarch32_stub_ctx_irq:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_irq
+ check_vector_size workaround_bpiall_vbar0_irq_sp_el0
+
+vector_entry workaround_bpiall_vbar0_fiq_sp_el0
+ b fiq_sp_el0
+aarch32_stub_fiq:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(4)
+ .word EMIT_SMC
+aarch32_stub_ctx_fiq:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
+
+vector_entry workaround_bpiall_vbar0_serror_sp_el0
+ b serror_sp_el0
+aarch32_stub_serror:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(8)
+ .word EMIT_SMC
+aarch32_stub_ctx_serror:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_serror
+ check_vector_size workaround_bpiall_vbar0_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx
+
+vector_entry workaround_bpiall_vbar0_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size workaround_bpiall_vbar0_irq_sp_elx
+
+vector_entry workaround_bpiall_vbar0_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size workaround_bpiall_vbar0_fiq_sp_elx
+
+vector_entry workaround_bpiall_vbar0_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size workaround_bpiall_vbar0_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
+ enter_workaround aarch32_stub_ctx_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
+
+vector_entry workaround_bpiall_vbar0_irq_aarch64
+ enter_workaround aarch32_stub_ctx_irq
+ check_vector_size workaround_bpiall_vbar0_irq_aarch64
+
+vector_entry workaround_bpiall_vbar0_fiq_aarch64
+ enter_workaround aarch32_stub_ctx_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_aarch64
+
+vector_entry workaround_bpiall_vbar0_serror_aarch64
+ enter_workaround aarch32_stub_ctx_serror
+ check_vector_size workaround_bpiall_vbar0_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
+ enter_workaround aarch32_stub_ctx_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
+
+vector_entry workaround_bpiall_vbar0_irq_aarch32
+ enter_workaround aarch32_stub_ctx_irq
+ check_vector_size workaround_bpiall_vbar0_irq_aarch32
+
+vector_entry workaround_bpiall_vbar0_fiq_aarch32
+ enter_workaround aarch32_stub_ctx_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_aarch32
+
+vector_entry workaround_bpiall_vbar0_serror_aarch32
+ enter_workaround aarch32_stub_ctx_serror
+ check_vector_size workaround_bpiall_vbar0_serror_aarch32
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used while the workaround is executing. It
+ * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
+ * workaround stubs to enter EL3 from S-EL1. It restores the previous
+ * EL3 state before proceeding with the normal runtime exception vector.
+ * ---------------------------------------------------------------------
+ */
+vector_base workaround_bpiall_vbar1_runtime_exceptions
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0
+
+vector_entry workaround_bpiall_vbar1_irq_sp_el0
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_sp_el0
+
+vector_entry workaround_bpiall_vbar1_fiq_sp_el0
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_sp_el0
+
+vector_entry workaround_bpiall_vbar1_serror_sp_el0
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx
+
+vector_entry workaround_bpiall_vbar1_irq_sp_elx
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_sp_elx
+
+vector_entry workaround_bpiall_vbar1_fiq_sp_elx
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_sp_elx
+
+vector_entry workaround_bpiall_vbar1_serror_sp_elx
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64
+
+vector_entry workaround_bpiall_vbar1_irq_aarch64
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_aarch64
+
+vector_entry workaround_bpiall_vbar1_fiq_aarch64
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_aarch64
+
+vector_entry workaround_bpiall_vbar1_serror_aarch64
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
+ /* Restore register state from the workaround context */
+ ldp x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
+ ldp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
+ ldp x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+
+ /* Apply the restored system register state */
+ msr scr_el3, x2
+ msr spsr_el3, x3
+ msr elr_el3, x4
+ msr sctlr_el1, x5
+ msr esr_el3, x6
+
+ /*
+ * Workaround is complete, so swap VBAR_EL3 to point
+ * to workaround entry table in preparation for subsequent
+ * Sync/IRQ/FIQ/SError exceptions.
+ */
+ adr x2, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x2
+
+ /*
+ * Restore all GP regs except x0 and x1. The value in x0
+ * indicates the type of the original exception.
+ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /*
+ * Each of these handlers will first restore x0 and x1 from
+ * the context and the branch to the common implementation for
+ * each of the exception types.
+ */
+ tbnz x0, #1, workaround_bpiall_vbar1_irq
+ tbnz x0, #2, workaround_bpiall_vbar1_fiq
+ tbnz x0, #3, workaround_bpiall_vbar1_serror
+
+ /* Fallthrough case for Sync exception */
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b sync_exception_aarch64
+ check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
+
+vector_entry workaround_bpiall_vbar1_irq_aarch32
+ b report_unhandled_interrupt
+workaround_bpiall_vbar1_irq:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b irq_aarch64
+ check_vector_size workaround_bpiall_vbar1_irq_aarch32
+
+vector_entry workaround_bpiall_vbar1_fiq_aarch32
+ b report_unhandled_interrupt
+workaround_bpiall_vbar1_fiq:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b fiq_aarch64
+ check_vector_size workaround_bpiall_vbar1_fiq_aarch32
+
+vector_entry workaround_bpiall_vbar1_serror_aarch32
+ b report_unhandled_exception
+workaround_bpiall_vbar1_serror:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b serror_aarch64
+ check_vector_size workaround_bpiall_vbar1_serror_aarch32
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
new file mode 100644
index 0000000..f478148
--- /dev/null
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl workaround_mmu_runtime_exceptions
+
+vector_base workaround_mmu_runtime_exceptions
+
+ .macro apply_workaround
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ mrs x0, sctlr_el3
+ /* Disable MMU */
+ bic x1, x0, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ isb
+ /* Restore MMU config */
+ msr sctlr_el3, x0
+ isb
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ check_vector_size workaround_mmu_sync_exception_sp_el0
+
+vector_entry workaround_mmu_irq_sp_el0
+ b irq_sp_el0
+ check_vector_size workaround_mmu_irq_sp_el0
+
+vector_entry workaround_mmu_fiq_sp_el0
+ b fiq_sp_el0
+ check_vector_size workaround_mmu_fiq_sp_el0
+
+vector_entry workaround_mmu_serror_sp_el0
+ b serror_sp_el0
+ check_vector_size workaround_mmu_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size workaround_mmu_sync_exception_sp_elx
+
+vector_entry workaround_mmu_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size workaround_mmu_irq_sp_elx
+
+vector_entry workaround_mmu_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size workaround_mmu_fiq_sp_elx
+
+vector_entry workaround_mmu_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size workaround_mmu_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_aarch64
+ apply_workaround
+ b sync_exception_aarch64
+ check_vector_size workaround_mmu_sync_exception_aarch64
+
+vector_entry workaround_mmu_irq_aarch64
+ apply_workaround
+ b irq_aarch64
+ check_vector_size workaround_mmu_irq_aarch64
+
+vector_entry workaround_mmu_fiq_aarch64
+ apply_workaround
+ b fiq_aarch64
+ check_vector_size workaround_mmu_fiq_aarch64
+
+vector_entry workaround_mmu_serror_aarch64
+ apply_workaround
+ b serror_aarch64
+ check_vector_size workaround_mmu_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_aarch32
+ apply_workaround
+ b sync_exception_aarch32
+ check_vector_size workaround_mmu_sync_exception_aarch32
+
+vector_entry workaround_mmu_irq_aarch32
+ apply_workaround
+ b irq_aarch32
+ check_vector_size workaround_mmu_irq_aarch32
+
+vector_entry workaround_mmu_fiq_aarch32
+ apply_workaround
+ b fiq_aarch32
+ check_vector_size workaround_mmu_fiq_aarch32
+
+vector_entry workaround_mmu_serror_aarch32
+ apply_workaround
+ b serror_aarch32
+ check_vector_size workaround_mmu_serror_aarch32
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 31adfb4..3ba8c1f 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -16,6 +16,8 @@
# It is enabled by default.
A57_DISABLE_NON_TEMPORAL_HINT ?=1
+WORKAROUND_CVE_2017_5715 ?=1
+
# Process SKIP_A57_L1_FLUSH_PWR_DWN flag
$(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
$(eval $(call add_define,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -28,6 +30,9 @@
$(eval $(call assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT))
$(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT))
+# Process WORKAROUND_CVE_2017_5715 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715))
+$(eval $(call add_define,WORKAROUND_CVE_2017_5715))
# CPU Errata Build flags.
# These should be enabled by the platform if the erratum workaround needs to be
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index d450bd6..effc5bd 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,26 +7,100 @@
#include <amu.h>
#include <arch.h>
#include <arch_helpers.h>
+#include <platform.h>
+#include <pubsub_events.h>
+
+#define AMU_GROUP0_NR_COUNTERS 4
+
+struct amu_ctx {
+ uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
void amu_enable(int el2_unused)
{
uint64_t features;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- if ((features & ID_PFR0_AMU_MASK) == 1) {
- if (el2_unused) {
- uint64_t v;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return;
- /*
- * Non-secure access from EL0 or EL1 to the Activity Monitor
- * registers do not trap to EL2.
- */
- v = read_hcptr();
- v &= ~TAM_BIT;
- write_hcptr(v);
- }
+ if (el2_unused) {
+ uint64_t v;
- /* Enable group 0 counters */
- write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+ /*
+ * Non-secure access from EL0 or EL1 to the Activity Monitor
+ * registers do not trap to EL2.
+ */
+ v = read_hcptr();
+ v &= ~TAM_BIT;
+ write_hcptr(v);
}
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
}
+
+static void *amu_context_save(const void *arg)
+{
+ struct amu_ctx *ctx;
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return (void *)-1;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Assert that group 0 counter configuration is what we expect */
+ assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK);
+
+ /*
+ * Disable group 0 counters to avoid other observers like SCP sampling
+ * counter values from the future via the memory mapped view.
+ */
+ write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
+ isb();
+
+ ctx->group0_cnts[0] = read64_amevcntr00();
+ ctx->group0_cnts[1] = read64_amevcntr01();
+ ctx->group0_cnts[2] = read64_amevcntr02();
+ ctx->group0_cnts[3] = read64_amevcntr03();
+
+ return 0;
+}
+
+static void *amu_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx;
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return (void *)-1;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Counters were disabled in `amu_context_save()` */
+ assert(read_amcntenset0() == 0);
+
+ /* Restore group 0 counters */
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 0))
+ write64_amevcntr00(ctx->group0_cnts[0]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 1))
+ write64_amevcntr01(ctx->group0_cnts[1]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 2))
+ write64_amevcntr02(ctx->group0_cnts[2]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 3))
+ write64_amevcntr03(ctx->group0_cnts[3]);
+ isb();
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 007b349..d7645a9 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -1,40 +1,185 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <amu.h>
+#include <amu_private.h>
#include <arch.h>
#include <arch_helpers.h>
+#include <assert.h>
+#include <platform.h>
+#include <pubsub_events.h>
-void amu_enable(int el2_unused)
+#define AMU_GROUP0_NR_COUNTERS 4
+
+struct amu_ctx {
+ uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+ uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+int amu_supported(void)
{
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
- if ((features & ID_AA64PFR0_AMU_MASK) == 1) {
- uint64_t v;
+ return (features & ID_AA64PFR0_AMU_MASK) == 1;
+}
+
+/*
+ * Enable counters. This function is meant to be invoked
+ * by the context management library before exiting from EL3.
+ */
+void amu_enable(int el2_unused)
+{
+ uint64_t v;
- if (el2_unused) {
- /*
- * CPTR_EL2.TAM: Set to zero so any accesses to
- * the Activity Monitor registers do not trap to EL2.
- */
- v = read_cptr_el2();
- v &= ~CPTR_EL2_TAM_BIT;
- write_cptr_el2(v);
- }
+ if (!amu_supported())
+ return;
+ if (el2_unused) {
/*
- * CPTR_EL3.TAM: Set to zero so that any accesses to
- * the Activity Monitor registers do not trap to EL3.
+ * CPTR_EL2.TAM: Set to zero so any accesses to
+ * the Activity Monitor registers do not trap to EL2.
*/
- v = read_cptr_el3();
- v &= ~TAM_BIT;
- write_cptr_el3(v);
-
- /* Enable group 0 counters */
- write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ v = read_cptr_el2();
+ v &= ~CPTR_EL2_TAM_BIT;
+ write_cptr_el2(v);
}
+
+ /*
+ * CPTR_EL3.TAM: Set to zero so that any accesses to
+ * the Activity Monitor registers do not trap to EL3.
+ */
+ v = read_cptr_el3();
+ v &= ~TAM_BIT;
+ write_cptr_el3(v);
+
+ /* Enable group 0 counters */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ /* Enable group 1 counters */
+ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ return amu_group0_cnt_read_internal(idx);
+}
+
+/* Write the group 0 counter identified by the given `idx` with `val`. */
+void amu_group0_cnt_write(int idx, uint64_t val)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ amu_group0_cnt_write_internal(idx, val);
+ isb();
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ return amu_group1_cnt_read_internal(idx);
+}
+
+/* Write the group 1 counter identified by the given `idx` with `val`. */
+void amu_group1_cnt_write(int idx, uint64_t val)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ amu_group1_cnt_write_internal(idx, val);
+ isb();
}
+
+/*
+ * Program the event type register for the given `idx` with
+ * the event number `val`.
+ */
+void amu_group1_set_evtype(int idx, unsigned int val)
+{
+ assert(amu_supported());
+ assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ amu_group1_set_evtype_internal(idx, val);
+ isb();
+}
+
+static void *amu_context_save(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ int i;
+
+ if (!amu_supported())
+ return (void *)-1;
+
+ /* Assert that group 0/1 counter configuration is what we expect */
+ assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK &&
+ read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+
+ assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
+ <= AMU_GROUP1_NR_COUNTERS);
+
+ /*
+ * Disable group 0/1 counters to avoid other observers like SCP sampling
+ * counter values from the future via the memory mapped view.
+ */
+ write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
+ write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
+ isb();
+
+ /* Save group 0 counters */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+
+ /* Save group 1 counters */
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ ctx->group1_cnts[i] = amu_group1_cnt_read(i);
+
+ return 0;
+}
+
+static void *amu_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ int i;
+
+ if (!amu_supported())
+ return (void *)-1;
+
+ /* Counters were disabled in `amu_context_save()` */
+ assert(read_amcntenset0_el0() == 0 && read_amcntenset1_el0() == 0);
+
+ assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
+ <= AMU_GROUP1_NR_COUNTERS);
+
+ /* Restore group 0 counters */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << i))
+ amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+
+ /* Restore group 1 counters */
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ if (AMU_GROUP1_COUNTERS_MASK & (1U << i))
+ amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ isb();
+
+ /* Restore group 0/1 counter configuration */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
new file mode 100644
index 0000000..e0b1f56
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+ .globl amu_group0_cnt_read_internal
+ .globl amu_group0_cnt_write_internal
+ .globl amu_group1_cnt_read_internal
+ .globl amu_group1_cnt_write_internal
+ .globl amu_group1_set_evtype_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #2
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR00_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR01_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR02_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR03_EL0 /* index 3 */
+ ret
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * void amu_group0_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group0_cnt_write_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #2
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVCNTR00_EL0, x1 /* index 0 */
+ ret
+ msr AMEVCNTR01_EL0, x1 /* index 1 */
+ ret
+ msr AMEVCNTR02_EL0, x1 /* index 2 */
+ ret
+ msr AMEVCNTR03_EL0, x1 /* index 3 */
+ ret
+endfunc amu_group0_cnt_write_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #4
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR10_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR11_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR12_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR13_EL0 /* index 3 */
+ ret
+ mrs x0, AMEVCNTR14_EL0 /* index 4 */
+ ret
+ mrs x0, AMEVCNTR15_EL0 /* index 5 */
+ ret
+ mrs x0, AMEVCNTR16_EL0 /* index 6 */
+ ret
+ mrs x0, AMEVCNTR17_EL0 /* index 7 */
+ ret
+ mrs x0, AMEVCNTR18_EL0 /* index 8 */
+ ret
+ mrs x0, AMEVCNTR19_EL0 /* index 9 */
+ ret
+ mrs x0, AMEVCNTR1A_EL0 /* index 10 */
+ ret
+ mrs x0, AMEVCNTR1B_EL0 /* index 11 */
+ ret
+ mrs x0, AMEVCNTR1C_EL0 /* index 12 */
+ ret
+ mrs x0, AMEVCNTR1D_EL0 /* index 13 */
+ ret
+ mrs x0, AMEVCNTR1E_EL0 /* index 14 */
+ ret
+ mrs x0, AMEVCNTR1F_EL0 /* index 15 */
+ ret
+endfunc amu_group1_cnt_read_internal
+
+/*
+ * void amu_group1_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group1_cnt_write_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #4
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVCNTR10_EL0, x1 /* index 0 */
+ ret
+ msr AMEVCNTR11_EL0, x1 /* index 1 */
+ ret
+ msr AMEVCNTR12_EL0, x1 /* index 2 */
+ ret
+ msr AMEVCNTR13_EL0, x1 /* index 3 */
+ ret
+ msr AMEVCNTR14_EL0, x1 /* index 4 */
+ ret
+ msr AMEVCNTR15_EL0, x1 /* index 5 */
+ ret
+ msr AMEVCNTR16_EL0, x1 /* index 6 */
+ ret
+ msr AMEVCNTR17_EL0, x1 /* index 7 */
+ ret
+ msr AMEVCNTR18_EL0, x1 /* index 8 */
+ ret
+ msr AMEVCNTR19_EL0, x1 /* index 9 */
+ ret
+ msr AMEVCNTR1A_EL0, x1 /* index 10 */
+ ret
+ msr AMEVCNTR1B_EL0, x1 /* index 11 */
+ ret
+ msr AMEVCNTR1C_EL0, x1 /* index 12 */
+ ret
+ msr AMEVCNTR1D_EL0, x1 /* index 13 */
+ ret
+ msr AMEVCNTR1E_EL0, x1 /* index 14 */
+ ret
+ msr AMEVCNTR1F_EL0, x1 /* index 15 */
+ ret
+endfunc amu_group1_cnt_write_internal
+
+/*
+ * void amu_group1_set_evtype_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_set_evtype_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #4
+ cmp x2, #0
+ ASM_ASSERT(eq)
+
+ /* val should be between [0, 65535] */
+ mov x2, x1
+ lsr x2, x2, #16
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of msr/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVTYPER10_EL0, x1 /* index 0 */
+ ret
+ msr AMEVTYPER11_EL0, x1 /* index 1 */
+ ret
+ msr AMEVTYPER12_EL0, x1 /* index 2 */
+ ret
+ msr AMEVTYPER13_EL0, x1 /* index 3 */
+ ret
+ msr AMEVTYPER14_EL0, x1 /* index 4 */
+ ret
+ msr AMEVTYPER15_EL0, x1 /* index 5 */
+ ret
+ msr AMEVTYPER16_EL0, x1 /* index 6 */
+ ret
+ msr AMEVTYPER17_EL0, x1 /* index 7 */
+ ret
+ msr AMEVTYPER18_EL0, x1 /* index 8 */
+ ret
+ msr AMEVTYPER19_EL0, x1 /* index 9 */
+ ret
+ msr AMEVTYPER1A_EL0, x1 /* index 10 */
+ ret
+ msr AMEVTYPER1B_EL0, x1 /* index 11 */
+ ret
+ msr AMEVTYPER1C_EL0, x1 /* index 12 */
+ ret
+ msr AMEVTYPER1D_EL0, x1 /* index 13 */
+ ret
+ msr AMEVTYPER1E_EL0, x1 /* index 14 */
+ ret
+ msr AMEVTYPER1F_EL0, x1 /* index 15 */
+ ret
+endfunc amu_group1_set_evtype_internal
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index d949067..a77972d 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -14,6 +14,7 @@
#include <debug.h>
#include <platform.h>
#include <pmf.h>
+#include <pubsub_events.h>
#include <runtime_instr.h>
#include <stddef.h>
#include "psci_private.h"
@@ -68,6 +69,8 @@
{
unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
+ PUBLISH_EVENT(psci_suspend_pwrdown_start);
+
/* Save PSCI target power level for the suspend finisher handler */
psci_set_suspend_pwrlvl(end_pwrlvl);
@@ -308,6 +311,8 @@
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
+ PUBLISH_EVENT(psci_suspend_pwrdown_finish);
+
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the suspend
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 0acfacb..75c5a91 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -893,7 +893,7 @@
* Check if the mapping function actually managed to map
* anything. If not, just return now.
*/
- if (mm_cursor->base_va >= end_va)
+ if (mm->base_va >= end_va)
return -ENOMEM;
/*
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 7edbd3d..9d3c5f6 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -150,6 +150,10 @@
# Enable Activity Monitor Unit extensions by default
ENABLE_AMU := 1
+ifeq (${ENABLE_AMU},1)
+BL31_SOURCES += lib/cpus/aarch64/cortex_a75_pubsub.c
+endif
+
ifneq (${ENABLE_STACK_PROTECTOR},0)
PLAT_BL_COMMON_SOURCES += plat/arm/board/fvp/fvp_stack_protector.c
endif
diff --git a/plat/hisilicon/hikey960/hikey960_bl1_setup.c b/plat/hisilicon/hikey960/hikey960_bl1_setup.c
index ae33bd2..11f143a 100644
--- a/plat/hisilicon/hikey960/hikey960_bl1_setup.c
+++ b/plat/hisilicon/hikey960/hikey960_bl1_setup.c
@@ -647,6 +647,8 @@
}
/* GPIO005 - PMU SSI, 10mA */
mmio_write_32(IOCG_006_REG, 2 << 4);
+ /* GPIO213 - PCIE_CLKREQ_N */
+ mmio_write_32(IOMG_AO_033_REG, 1);
}
/*
diff --git a/plat/hisilicon/hikey960/include/hi3660.h b/plat/hisilicon/hikey960/include/hi3660.h
index 83d1b36..ab7b8aa 100644
--- a/plat/hisilicon/hikey960/include/hi3660.h
+++ b/plat/hisilicon/hikey960/include/hi3660.h
@@ -335,6 +335,8 @@
#define IOMG_AO_026_REG (IOMG_AO_REG_BASE + 0x068)
/* GPIO219: PD interrupt. pull up */
#define IOMG_AO_039_REG (IOMG_AO_REG_BASE + 0x09C)
+/* GPIO213: PCIE_CLKREQ_N */
+#define IOMG_AO_033_REG (IOMG_AO_REG_BASE + 0x084)
#define IOCG_AO_REG_BASE 0xFFF1187C
/* GPIO219: PD interrupt. pull up */
diff --git a/plat/socionext/uniphier/platform.mk b/plat/socionext/uniphier/platform.mk
index 41d0444..f99bbf5 100644
--- a/plat/socionext/uniphier/platform.mk
+++ b/plat/socionext/uniphier/platform.mk
@@ -117,4 +117,4 @@
bl1_gzip: $(BUILD_PLAT)/bl1.bin.gzip
%.gzip: %
@echo " GZIP $@"
- $(Q)(cat $< | gzip -n -f -9 > $@) || (rm -f $@ || false)
+ $(Q)gzip -n -f -9 $< --stdout > $@
diff --git a/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c b/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c
new file mode 100644
index 0000000..bfc19d3
--- /dev/null
+++ b/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Top-level SMC handler for ZynqMP IPI Mailbox doorbell functions.
+ */
+
+#include <bakery_lock.h>
+#include <debug.h>
+#include <errno.h>
+#include <mmio.h>
+#include <runtime_svc.h>
+#include <string.h>
+#include "ipi_mailbox_svc.h"
+#include "../zynqmp_ipi.h"
+#include "../zynqmp_private.h"
+#include "../../../services/spd/trusty/smcall.h"
+
+/*********************************************************************
+ * Macros definitions
+ ********************************************************************/
+
+/* IPI SMC calls macros: */
+#define IPI_SMC_OPEN_IRQ_MASK 0x00000001U /* IRQ enable bit in IPI
+ * open SMC call
+ */
+#define IPI_SMC_NOTIFY_BLOCK_MASK 0x00000001U /* Flag to indicate if
+ * IPI notification needs
+ * to be blocking.
+ */
+#define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001U /* Flag to indicate if
+ * notification interrupt
+ * to be disabled.
+ */
+#define IPI_SMC_ACK_EIRQ_MASK 0x00000001U /* Flag to indicate if
+ * notification interrupt
+ * to be enable.
+ */
+
+#define UNSIGNED32_MASK 0xFFFFFFFFU /* 32bit mask */
+
+/**
+ * ipi_smc_handler() - SMC handler for IPI SMC calls
+ *
+ * @smc_fid - Function identifier
+ * @x1 - x4 - Arguments
+ * @cookie - Unused
+ * @handler - Pointer to caller's context structure
+ *
+ * @return - Unused
+ *
+ * Determines that smc_fid is valid and supported PM SMC Function ID from the
+ * list of pm_api_ids, otherwise completes the request with
+ * the unknown SMC Function ID
+ *
+ * The SMC calls for PM service are forwarded from SIP Service SMC handler
+ * function with rt_svc_handle signature
+ */
+uint64_t ipi_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie,
+ void *handle, uint64_t flags)
+{
+ int ret;
+ uint32_t ipi_local_id;
+ uint32_t ipi_remote_id;
+ unsigned int is_secure;
+
+ ipi_local_id = x1 & UNSIGNED32_MASK;
+ ipi_remote_id = x2 & UNSIGNED32_MASK;
+
+ if (SMC_ENTITY(smc_fid) >= SMC_ENTITY_TRUSTED_APP)
+ is_secure = 1;
+ else
+ is_secure = 0;
+
+ /* Validate IPI mailbox access */
+ ret = ipi_mb_validate(ipi_local_id, ipi_remote_id, is_secure);
+ if (ret)
+ SMC_RET1(handle, ret);
+
+ switch (SMC_FUNCTION(smc_fid)) {
+ case IPI_MAILBOX_OPEN:
+ ipi_mb_open(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ case IPI_MAILBOX_RELEASE:
+ ipi_mb_release(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ case IPI_MAILBOX_STATUS_ENQUIRY:
+ {
+ int disable_irq;
+
+ disable_irq = (x3 & IPI_SMC_ENQUIRY_DIRQ_MASK) ? 1 : 0;
+ ret = ipi_mb_enquire_status(ipi_local_id, ipi_remote_id);
+ if ((ret & IPI_MB_STATUS_RECV_PENDING) && disable_irq)
+ ipi_mb_disable_irq(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, ret);
+ }
+ case IPI_MAILBOX_NOTIFY:
+ {
+ uint32_t is_blocking;
+
+ is_blocking = (x3 & IPI_SMC_NOTIFY_BLOCK_MASK) ? 1 : 0;
+ ipi_mb_notify(ipi_local_id, ipi_remote_id, is_blocking);
+ SMC_RET1(handle, 0);
+ }
+ case IPI_MAILBOX_ACK:
+ {
+ int enable_irq;
+
+ enable_irq = (x3 & IPI_SMC_ACK_EIRQ_MASK) ? 1 : 0;
+ ipi_mb_ack(ipi_local_id, ipi_remote_id);
+ if (enable_irq)
+ ipi_mb_enable_irq(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ }
+ case IPI_MAILBOX_ENABLE_IRQ:
+ ipi_mb_enable_irq(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ case IPI_MAILBOX_DISABLE_IRQ:
+ ipi_mb_disable_irq(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ default:
+ WARN("Unimplemented IPI service call: 0x%x\n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
diff --git a/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.h b/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.h
new file mode 100644
index 0000000..387ffd2
--- /dev/null
+++ b/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* ZynqMP IPI mailbox doorbell service enums and defines */
+
+#ifndef _IPI_MAILBOX_SVC_H_
+#define _IPI_MAILBOX_SVC_H_
+
+#include <stdint.h>
+
+/*********************************************************************
+ * Enum definitions
+ ********************************************************************/
+
+/* IPI SMC function numbers enum definition */
+enum ipi_api_id {
+ /* IPI mailbox operations functions: */
+ IPI_MAILBOX_OPEN = 0x1000,
+ IPI_MAILBOX_RELEASE,
+ IPI_MAILBOX_STATUS_ENQUIRY,
+ IPI_MAILBOX_NOTIFY,
+ IPI_MAILBOX_ACK,
+ IPI_MAILBOX_ENABLE_IRQ,
+ IPI_MAILBOX_DISABLE_IRQ
+};
+
+/*********************************************************************
+ * IPI mailbox service APIs declarations
+ ********************************************************************/
+
+/* IPI SMC handler */
+uint64_t ipi_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie, void *handle,
+ uint64_t flags);
+
+#endif /* _IPI_MAILBOX_SVC_H_ */
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index cb3b442..bdd194b 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -42,7 +42,8 @@
PLAT_INCLUDES := -Iinclude/plat/arm/common/ \
-Iinclude/plat/arm/common/aarch64/ \
-Iplat/xilinx/zynqmp/include/ \
- -Iplat/xilinx/zynqmp/pm_service/
+ -Iplat/xilinx/zynqmp/pm_service/ \
+ -Iplat/xilinx/zynqmp/ipi_mailbox_service/
PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \
lib/xlat_tables/aarch64/xlat_tables.c \
@@ -71,7 +72,9 @@
plat/xilinx/zynqmp/plat_startup.c \
plat/xilinx/zynqmp/plat_topology.c \
plat/xilinx/zynqmp/sip_svc_setup.c \
+ plat/xilinx/zynqmp/zynqmp_ipi.c \
plat/xilinx/zynqmp/pm_service/pm_svc_main.c \
plat/xilinx/zynqmp/pm_service/pm_api_sys.c \
plat/xilinx/zynqmp/pm_service/pm_ipi.c \
- plat/xilinx/zynqmp/pm_service/pm_client.c
+ plat/xilinx/zynqmp/pm_service/pm_client.c \
+ plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_sys.c b/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
index 90c670d..9e21067 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -542,7 +542,6 @@
*/
void pm_get_callbackdata(uint32_t *data, size_t count)
{
-
pm_ipi_buff_read_callb(data, count);
- pm_ipi_irq_clear();
+ pm_ipi_irq_clear(primary_proc);
}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_common.h b/plat/xilinx/zynqmp/pm_service/pm_common.h
index 03351c2..5dcbb0d 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_common.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_common.h
@@ -21,13 +21,13 @@
/**
* pm_ipi - struct for capturing IPI-channel specific info
- * @mask mask for enabling/disabling and triggering the IPI
- * @base base address for IPI
+ * @apu_ipi_id APU IPI agent ID
+ * @pmu_ipi_id PMU Agent ID
* @buffer_base base address for payload buffer
*/
struct pm_ipi {
- const unsigned int mask;
- const uintptr_t base;
+ const uint32_t apu_ipi_id;
+ const uint32_t pmu_ipi_id;
const uintptr_t buffer_base;
};
diff --git a/plat/xilinx/zynqmp/pm_service/pm_ipi.c b/plat/xilinx/zynqmp/pm_service/pm_ipi.c
index fdffde7..58faf0e 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_ipi.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_ipi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,28 +8,17 @@
#include <bakery_lock.h>
#include <mmio.h>
#include <platform.h>
+#include "../zynqmp_ipi.h"
#include "../zynqmp_private.h"
#include "pm_ipi.h"
/* IPI message buffers */
#define IPI_BUFFER_BASEADDR 0xFF990000U
-#define IPI_BUFFER_RPU_0_BASE (IPI_BUFFER_BASEADDR + 0x0U)
-#define IPI_BUFFER_RPU_1_BASE (IPI_BUFFER_BASEADDR + 0x200U)
#define IPI_BUFFER_APU_BASE (IPI_BUFFER_BASEADDR + 0x400U)
-#define IPI_BUFFER_PL_0_BASE (IPI_BUFFER_BASEADDR + 0x600U)
-#define IPI_BUFFER_PL_1_BASE (IPI_BUFFER_BASEADDR + 0x800U)
-#define IPI_BUFFER_PL_2_BASE (IPI_BUFFER_BASEADDR + 0xA00U)
-#define IPI_BUFFER_PL_3_BASE (IPI_BUFFER_BASEADDR + 0xC00U)
#define IPI_BUFFER_PMU_BASE (IPI_BUFFER_BASEADDR + 0xE00U)
-#define IPI_BUFFER_TARGET_RPU_0_OFFSET 0x0U
-#define IPI_BUFFER_TARGET_RPU_1_OFFSET 0x40U
#define IPI_BUFFER_TARGET_APU_OFFSET 0x80U
-#define IPI_BUFFER_TARGET_PL_0_OFFSET 0xC0U
-#define IPI_BUFFER_TARGET_PL_1_OFFSET 0x100U
-#define IPI_BUFFER_TARGET_PL_2_OFFSET 0x140U
-#define IPI_BUFFER_TARGET_PL_3_OFFSET 0x180U
#define IPI_BUFFER_TARGET_PMU_OFFSET 0x1C0U
#define IPI_BUFFER_MAX_WORDS 8
@@ -37,76 +26,33 @@
#define IPI_BUFFER_REQ_OFFSET 0x0U
#define IPI_BUFFER_RESP_OFFSET 0x20U
-/* IPI Base Address */
-#define IPI_BASEADDR 0XFF300000
-
-/* APU's IPI registers */
-#define IPI_APU_ISR (IPI_BASEADDR + 0X00000010)
-#define IPI_APU_IER (IPI_BASEADDR + 0X00000018)
-#define IPI_APU_IDR (IPI_BASEADDR + 0X0000001C)
-#define IPI_APU_IXR_PMU_0_MASK (1 << 16)
-
-#define IPI_TRIG_OFFSET 0
-#define IPI_OBS_OFFSET 4
-
-/* Power Management IPI interrupt number */
-#define PM_INT_NUM 0
-#define IPI_PMU_PM_INT_BASE (IPI_PMU_0_TRIG + (PM_INT_NUM * 0x1000))
-#define IPI_PMU_PM_INT_MASK (IPI_APU_IXR_PMU_0_MASK << PM_INT_NUM)
-#if (PM_INT_NUM < 0 || PM_INT_NUM > 3)
- #error PM_INT_NUM value out of range
-#endif
-
-#define IPI_APU_MASK 1U
-
DEFINE_BAKERY_LOCK(pm_secure_lock);
const struct pm_ipi apu_ipi = {
- .mask = IPI_APU_MASK,
- .base = IPI_BASEADDR,
+ .apu_ipi_id = IPI_ID_APU,
+ .pmu_ipi_id = IPI_ID_PMU0,
.buffer_base = IPI_BUFFER_APU_BASE,
};
/**
* pm_ipi_init() - Initialize IPI peripheral for communication with PMU
*
+ * @proc Pointer to the processor who is initiating request
* @return On success, the initialization function must return 0.
* Any other return value will cause the framework to ignore
* the service
*
* Called from pm_setup initialization function
*/
-int pm_ipi_init(void)
+int pm_ipi_init(const struct pm_proc *proc)
{
bakery_lock_init(&pm_secure_lock);
-
- /* IPI Interrupts Clear & Disable */
- mmio_write_32(IPI_APU_ISR, 0xffffffff);
- mmio_write_32(IPI_APU_IDR, 0xffffffff);
+ ipi_mb_open(proc->ipi->apu_ipi_id, proc->ipi->pmu_ipi_id);
return 0;
}
/**
- * pm_ipi_wait() - wait for pmu to handle request
- * @proc proc which is waiting for PMU to handle request
- */
-static enum pm_ret_status pm_ipi_wait(const struct pm_proc *proc)
-{
- int status;
-
- /* Wait until previous interrupt is handled by PMU */
- do {
- status = mmio_read_32(proc->ipi->base + IPI_OBS_OFFSET) &
- IPI_PMU_PM_INT_MASK;
- /* TODO: 1) Use timer to add delay between read attempts */
- /* TODO: 2) Return PM_RET_ERR_TIMEOUT if this times out */
- } while (status);
-
- return PM_RET_SUCCESS;
-}
-
-/**
* pm_ipi_send_common() - Sends IPI request to the PMU
* @proc Pointer to the processor who is initiating request
* @payload API id and call arguments to be written in IPI buffer
@@ -124,16 +70,13 @@
IPI_BUFFER_TARGET_PMU_OFFSET +
IPI_BUFFER_REQ_OFFSET;
- /* Wait until previous interrupt is handled by PMU */
- pm_ipi_wait(proc);
-
/* Write payload into IPI buffer */
for (size_t i = 0; i < PAYLOAD_ARG_CNT; i++) {
mmio_write_32(buffer_base + offset, payload[i]);
offset += PAYLOAD_ARG_SIZE;
}
/* Generate IPI to PMU */
- mmio_write_32(proc->ipi->base + IPI_TRIG_OFFSET, IPI_PMU_PM_INT_MASK);
+ ipi_mb_notify(proc->ipi->apu_ipi_id, proc->ipi->pmu_ipi_id, 1);
return PM_RET_SUCCESS;
}
@@ -178,8 +121,6 @@
IPI_BUFFER_TARGET_PMU_OFFSET +
IPI_BUFFER_RESP_OFFSET;
- pm_ipi_wait(proc);
-
/*
* Read response from IPI buffer
* buf-0: success or error+reason
@@ -250,17 +191,12 @@
return ret;
}
-void pm_ipi_irq_enable(void)
-{
- mmio_write_32(IPI_APU_IER, IPI_APU_IXR_PMU_0_MASK);
-}
-
-void pm_ipi_irq_disable(void)
+void pm_ipi_irq_enable(const struct pm_proc *proc)
{
- mmio_write_32(IPI_APU_IDR, IPI_APU_IXR_PMU_0_MASK);
+ ipi_mb_enable_irq(proc->ipi->apu_ipi_id, proc->ipi->pmu_ipi_id);
}
-void pm_ipi_irq_clear(void)
+void pm_ipi_irq_clear(const struct pm_proc *proc)
{
- mmio_write_32(IPI_APU_ISR, IPI_APU_IXR_PMU_0_MASK);
+ ipi_mb_ack(proc->ipi->apu_ipi_id, proc->ipi->pmu_ipi_id);
}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_ipi.h b/plat/xilinx/zynqmp/pm_service/pm_ipi.h
index a76298b..e6b36f5 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_ipi.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_ipi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,7 +9,7 @@
#include "pm_common.h"
-int pm_ipi_init(void);
+int pm_ipi_init(const struct pm_proc *proc);
enum pm_ret_status pm_ipi_send(const struct pm_proc *proc,
uint32_t payload[PAYLOAD_ARG_CNT]);
@@ -17,8 +17,7 @@
uint32_t payload[PAYLOAD_ARG_CNT],
unsigned int *value, size_t count);
void pm_ipi_buff_read_callb(unsigned int *value, size_t count);
-void pm_ipi_irq_enable(void);
-void pm_ipi_irq_disable(void);
-void pm_ipi_irq_clear(void);
+void pm_ipi_irq_enable(const struct pm_proc *proc);
+void pm_ipi_irq_clear(const struct pm_proc *proc);
#endif /* _PM_IPI_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
index f4e679b..fb64bc5 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -49,22 +49,25 @@
*/
int pm_setup(void)
{
- int status;
+ int status, ret;
if (!zynqmp_is_pmu_up())
return -ENODEV;
- status = pm_ipi_init();
+ status = pm_ipi_init(primary_proc);
- if (status == 0)
+ if (status >= 0) {
INFO("BL31: PM Service Init Complete: API v%d.%d\n",
PM_VERSION_MAJOR, PM_VERSION_MINOR);
- else
+ ret = 0;
+ } else {
INFO("BL31: PM Service Init Failed, Error Code %d!\n", status);
+ ret = status;
+ }
pm_down = status;
- return status;
+ return ret;
}
/**
@@ -163,7 +166,7 @@
* Even if we were wrong, it would not enable the IRQ in
* the GIC.
*/
- pm_ipi_irq_enable();
+ pm_ipi_irq_enable(primary_proc);
SMC_RET1(handle, (uint64_t)ret |
((uint64_t)pm_ctx.api_version << 32));
diff --git a/plat/xilinx/zynqmp/sip_svc_setup.c b/plat/xilinx/zynqmp/sip_svc_setup.c
index ae6ecaf..8b44eaa 100644
--- a/plat/xilinx/zynqmp/sip_svc_setup.c
+++ b/plat/xilinx/zynqmp/sip_svc_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,7 +8,9 @@
#include <runtime_svc.h>
#include <uuid.h>
+#include "ipi_mailbox_svc.h"
#include "pm_svc_main.h"
+#include "zynqmp_ipi.h"
/* SMC function IDs for SiP Service queries */
#define ZYNQMP_SIP_SVC_CALL_COUNT 0x8200ff00
@@ -19,10 +21,12 @@
#define SIP_SVC_VERSION_MAJOR 0
#define SIP_SVC_VERSION_MINOR 1
-/* These macros are used to identify PM calls from the SMC function ID */
+/* These macros are used to identify PM, IPI calls from the SMC function ID */
#define PM_FID_MASK 0xf000u
#define PM_FID_VALUE 0u
+#define IPI_FID_VALUE 0x1000u
#define is_pm_fid(_fid) (((_fid) & PM_FID_MASK) == PM_FID_VALUE)
+#define is_ipi_fid(_fid) (((_fid) & PM_FID_MASK) == IPI_FID_VALUE)
/* SiP Service UUID */
DEFINE_SVC_UUID(zynqmp_sip_uuid,
@@ -63,6 +67,12 @@
flags);
}
+ /* Let IPI SMC handler deal with IPI-related requests */
+ if (is_ipi_fid(smc_fid)) {
+ return ipi_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ }
+
switch (smc_fid) {
case ZYNQMP_SIP_SVC_CALL_COUNT:
/* PM functions + default functions */
diff --git a/plat/xilinx/zynqmp/zynqmp_ipi.c b/plat/xilinx/zynqmp/zynqmp_ipi.c
new file mode 100644
index 0000000..755a3b7
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_ipi.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Zynq UltraScale+ MPSoC IPI agent registers access management
+ */
+
+#include <bakery_lock.h>
+#include <debug.h>
+#include <errno.h>
+#include <mmio.h>
+#include <runtime_svc.h>
+#include <string.h>
+#include "zynqmp_ipi.h"
+#include "../zynqmp_private.h"
+
+/*********************************************************************
+ * Macros definitions
+ ********************************************************************/
+
+/* IPI registers base address */
+#define IPI_REGS_BASE 0xFF300000U
+
+/* IPI registers offsets macros */
+#define IPI_TRIG_OFFSET 0x00U
+#define IPI_OBR_OFFSET 0x04U
+#define IPI_ISR_OFFSET 0x10U
+#define IPI_IMR_OFFSET 0x14U
+#define IPI_IER_OFFSET 0x18U
+#define IPI_IDR_OFFSET 0x1CU
+
+/* IPI register start offset */
+#define IPI_REG_BASE(I) (zynqmp_ipi_table[(I)].ipi_reg_base)
+
+/* IPI register bit mask */
+#define IPI_BIT_MASK(I) (zynqmp_ipi_table[(I)].ipi_bit_mask)
+
+/* IPI secure check */
+#define IPI_SECURE_MASK 0x1U
+#define IPI_IS_SECURE(I) ((zynqmp_ipi_table[(I)].secure_only & \
+ IPI_SECURE_MASK) ? 1 : 0)
+
+/*********************************************************************
+ * Struct definitions
+ ********************************************************************/
+
+/* structure to maintain IPI configuration information */
+struct zynqmp_ipi_config {
+ unsigned int ipi_bit_mask;
+ unsigned int ipi_reg_base;
+ unsigned char secure_only;
+};
+
+/* Zynqmp ipi configuration table */
+const static struct zynqmp_ipi_config zynqmp_ipi_table[] = {
+ /* APU IPI */
+ {
+ .ipi_bit_mask = 0x1,
+ .ipi_reg_base = 0xFF300000,
+ .secure_only = 0,
+ },
+ /* RPU0 IPI */
+ {
+ .ipi_bit_mask = 0x100,
+ .ipi_reg_base = 0xFF310000,
+ .secure_only = 0,
+ },
+ /* RPU1 IPI */
+ {
+ .ipi_bit_mask = 0x200,
+ .ipi_reg_base = 0xFF320000,
+ .secure_only = 0,
+ },
+ /* PMU0 IPI */
+ {
+ .ipi_bit_mask = 0x10000,
+ .ipi_reg_base = 0xFF330000,
+ .secure_only = IPI_SECURE_MASK,
+ },
+ /* PMU1 IPI */
+ {
+ .ipi_bit_mask = 0x20000,
+ .ipi_reg_base = 0xFF331000,
+ .secure_only = IPI_SECURE_MASK,
+ },
+ /* PMU2 IPI */
+ {
+ .ipi_bit_mask = 0x40000,
+ .ipi_reg_base = 0xFF332000,
+ .secure_only = IPI_SECURE_MASK,
+ },
+ /* PMU3 IPI */
+ {
+ .ipi_bit_mask = 0x80000,
+ .ipi_reg_base = 0xFF333000,
+ .secure_only = IPI_SECURE_MASK,
+ },
+ /* PL0 IPI */
+ {
+ .ipi_bit_mask = 0x1000000,
+ .ipi_reg_base = 0xFF340000,
+ .secure_only = 0,
+ },
+ /* PL1 IPI */
+ {
+ .ipi_bit_mask = 0x2000000,
+ .ipi_reg_base = 0xFF350000,
+ .secure_only = 0,
+ },
+ /* PL2 IPI */
+ {
+ .ipi_bit_mask = 0x4000000,
+ .ipi_reg_base = 0xFF360000,
+ .secure_only = 0,
+ },
+ /* PL3 IPI */
+ {
+ .ipi_bit_mask = 0x8000000,
+ .ipi_reg_base = 0xFF370000,
+ .secure_only = 0,
+ },
+};
+
+/* is_ipi_mb_within_range() - verify if IPI mailbox is within range
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * return - 1 if within range, 0 if not
+ */
+static inline int is_ipi_mb_within_range(uint32_t local, uint32_t remote)
+{
+ int ret = 1;
+ uint32_t ipi_total = ARRAY_SIZE(zynqmp_ipi_table);
+
+ if (remote >= ipi_total || local >= ipi_total)
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * ipi_mb_validate() - validate IPI mailbox access
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ * @is_secure - indicate if the requester is from secure software
+ *
+ * return - 0 success, negative value for errors
+ */
+int ipi_mb_validate(uint32_t local, uint32_t remote, unsigned int is_secure)
+{
+ int ret = 0;
+
+ if (!is_ipi_mb_within_range(local, remote))
+ ret = -EINVAL;
+ else if (IPI_IS_SECURE(local) && !is_secure)
+ ret = -EPERM;
+ else if (IPI_IS_SECURE(remote) && !is_secure)
+ ret = -EPERM;
+
+ return ret;
+}
+
+/**
+ * ipi_mb_open() - Open IPI mailbox.
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ */
+void ipi_mb_open(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_IDR_OFFSET,
+ IPI_BIT_MASK(remote));
+ mmio_write_32(IPI_REG_BASE(local) + IPI_ISR_OFFSET,
+ IPI_BIT_MASK(remote));
+}
+
+/**
+ * ipi_mb_release() - Open IPI mailbox.
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ */
+void ipi_mb_release(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_IDR_OFFSET,
+ IPI_BIT_MASK(remote));
+}
+
+/**
+ * ipi_mb_enquire_status() - Enquire IPI mailbox status
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * return - 0 idle, positive value for pending sending or receiving,
+ * negative value for errors
+ */
+int ipi_mb_enquire_status(uint32_t local, uint32_t remote)
+{
+ int ret = 0;
+ uint32_t status;
+
+ status = mmio_read_32(IPI_REG_BASE(local) + IPI_OBR_OFFSET);
+ if (status & IPI_BIT_MASK(remote))
+ ret |= IPI_MB_STATUS_SEND_PENDING;
+ status = mmio_read_32(IPI_REG_BASE(local) + IPI_ISR_OFFSET);
+ if (status & IPI_BIT_MASK(remote))
+ ret |= IPI_MB_STATUS_RECV_PENDING;
+
+ return ret;
+}
+
+/* ipi_mb_notify() - Trigger IPI mailbox notification
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ * @is_blocking - if to trigger the notification in blocking mode or not.
+ *
+ * It sets the remote bit in the IPI agent trigger register.
+ *
+ */
+void ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking)
+{
+ uint32_t status;
+
+ mmio_write_32(IPI_REG_BASE(local) + IPI_TRIG_OFFSET,
+ IPI_BIT_MASK(remote));
+ if (is_blocking) {
+ do {
+ status = mmio_read_32(IPI_REG_BASE(local) +
+ IPI_OBR_OFFSET);
+ } while (status & IPI_BIT_MASK(remote));
+ }
+}
+
+/* ipi_mb_ack() - Ack IPI mailbox notification from the other end
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * It will clear the remote bit in the isr register.
+ *
+ */
+void ipi_mb_ack(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_ISR_OFFSET,
+ IPI_BIT_MASK(remote));
+}
+
+/* ipi_mb_disable_irq() - Disable IPI mailbox notification interrupt
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * It will mask the remote bit in the idr register.
+ *
+ */
+void ipi_mb_disable_irq(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_IDR_OFFSET,
+ IPI_BIT_MASK(remote));
+}
+
+/* ipi_mb_enable_irq() - Enable IPI mailbox notification interrupt
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * It will mask the remote bit in the idr register.
+ *
+ */
+void ipi_mb_enable_irq(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_IER_OFFSET,
+ IPI_BIT_MASK(remote));
+}
diff --git a/plat/xilinx/zynqmp/zynqmp_ipi.h b/plat/xilinx/zynqmp/zynqmp_ipi.h
new file mode 100644
index 0000000..0544ddb
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_ipi.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* ZynqMP IPI management enums and defines */
+
+#ifndef _ZYNQMP_IPI_H_
+#define _ZYNQMP_IPI_H_
+
+#include <stdint.h>
+
+/*********************************************************************
+ * IPI agent IDs macros
+ ********************************************************************/
+#define IPI_ID_APU 0U
+#define IPI_ID_RPU0 1U
+#define IPI_ID_RPU1 2U
+#define IPI_ID_PMU0 3U
+#define IPI_ID_PMU1 4U
+#define IPI_ID_PMU2 5U
+#define IPI_ID_PMU3 6U
+#define IPI_ID_PL0 7U
+#define IPI_ID_PL1 8U
+#define IPI_ID_PL2 9U
+#define IPI_ID_PL3 10U
+
+/*********************************************************************
+ * IPI mailbox status macros
+ ********************************************************************/
+#define IPI_MB_STATUS_IDLE 0
+#define IPI_MB_STATUS_SEND_PENDING 1
+#define IPI_MB_STATUS_RECV_PENDING 2
+
+/*********************************************************************
+ * IPI mailbox call is secure or not macros
+ ********************************************************************/
+#define IPI_MB_CALL_NOTSECURE 0
+#define IPI_MB_CALL_SECURE 1
+
+/*********************************************************************
+ * IPI APIs declarations
+ ********************************************************************/
+
+/* Validate IPI mailbox access */
+int ipi_mb_validate(uint32_t local, uint32_t remote, unsigned int is_secure);
+
+/* Open the IPI mailbox */
+void ipi_mb_open(uint32_t local, uint32_t remote);
+
+/* Release the IPI mailbox */
+void ipi_mb_release(uint32_t local, uint32_t remote);
+
+/* Enquire IPI mailbox status */
+int ipi_mb_enquire_status(uint32_t local, uint32_t remote);
+
+/* Trigger notification on the IPI mailbox */
+void ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking);
+
+/* Ack IPI mailbox notification */
+void ipi_mb_ack(uint32_t local, uint32_t remote);
+
+/* Disable IPI mailbox notification interrupt */
+void ipi_mb_disable_irq(uint32_t local, uint32_t remote);
+
+/* Enable IPI mailbox notification interrupt */
+void ipi_mb_enable_irq(uint32_t local, uint32_t remote);
+
+#endif /* _ZYNQMP_IPI_H_ */
diff --git a/services/spd/tlkd/tlkd_main.c b/services/spd/tlkd/tlkd_main.c
index 78e9853..cb68bff 100644
--- a/services/spd/tlkd/tlkd_main.c
+++ b/services/spd/tlkd/tlkd_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -193,12 +193,14 @@
* b. register shared memory with the SP for passing args
* required for maintaining sessions with the Trusted
* Applications.
- * c. open/close sessions
- * d. issue commands to the Trusted Apps
- * e. resume the preempted yielding SMC call.
+ * c. register non-secure world's memory map with the OS
+ * d. open/close sessions
+ * e. issue commands to the Trusted Apps
+ * f. resume the preempted yielding SMC call.
*/
case TLK_REGISTER_LOGBUF:
case TLK_REGISTER_REQBUF:
+ case TLK_REGISTER_NS_DRAM:
case TLK_OPEN_TA_SESSION:
case TLK_CLOSE_TA_SESSION:
case TLK_TA_LAUNCH_OP: