Merge pull request #1245 from antonio-nino-diaz-arm/an/checkpatch
Analyze coding style of patches individually
diff --git a/Makefile b/Makefile
index 6eec581..ee22752 100644
--- a/Makefile
+++ b/Makefile
@@ -127,6 +127,12 @@
PP := ${CROSS_COMPILE}gcc -E
DTC ?= dtc
+# Use ${LD}.bfd instead if it exists (as absolute path or together with $PATH).
+ifneq ($(strip $(wildcard ${LD}.bfd) \
+ $(foreach dir,$(subst :, ,${PATH}),$(wildcard ${dir}/${LD}.bfd))),)
+LD := ${LD}.bfd
+endif
+
ifeq (${ARM_ARCH_MAJOR},7)
target32-directive = -target arm-none-eabi
# Will set march32-directive from platform configuration
@@ -163,7 +169,7 @@
GCC_V_OUTPUT := $(shell $(CC) -v 2>&1)
PIE_FOUND := $(findstring --enable-default-pie,${GCC_V_OUTPUT})
-ifeq ($(PIE_FOUND),1)
+ifneq ($(PIE_FOUND),)
TF_CFLAGS += -fno-PIE
endif
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 9b7735f..57c065c 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -153,7 +153,14 @@
.endm
- .macro save_x18_to_x29_sp_el0
+ .macro save_x4_to_x29_sp_el0
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
@@ -297,34 +304,16 @@
/* Check whether aarch32 issued an SMC64 */
tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
- /*
- * Since we're are coming from aarch32, x8-x18 need to be saved as per
- * SMC32 calling convention. If a lower EL in aarch64 is making an
- * SMC32 call then it must have saved x8-x17 already therein.
- */
- stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
- stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
- stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
- stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
- stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
-
- /* x4-x7, x18, sp_el0 are saved below */
-
smc_handler64:
/*
* Populate the parameters for the SMC handler.
* We already have x0-x4 in place. x5 will point to a cookie (not used
* now). x6 will point to the context structure (SP_EL3) and x7 will
- * contain flags we need to pass to the handler Hence save x5-x7.
+ * contain flags we need to pass to the handler.
*
- * Note: x4 only needs to be preserved for AArch32 callers but we do it
- * for AArch64 callers as well for convenience
+ * Save x4-x29 and sp_el0. Refer to SMCCC v1.1.
*/
- stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
- stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
-
- /* Save rest of the gpregs and sp_el0*/
- save_x18_to_x29_sp_el0
+ save_x4_to_x29_sp_el0
mov x5, xzr
mov x6, sp
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 2db4856..886d301 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -23,6 +23,7 @@
bl31/bl31_context_mgmt.c \
common/runtime_svc.c \
plat/common/aarch64/platform_mp_stack.S \
+ services/arm_arch_svc/arm_arch_svc_setup.c \
services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES} \
${SPM_SOURCES} \
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
index 1f8fcc8..c383c5d 100644
--- a/docs/firmware-design.rst
+++ b/docs/firmware-design.rst
@@ -2574,9 +2574,9 @@
``ARM_ARCH_MINOR`` >= 2.
- The Common not Private (CnP) bit is enabled to indicate that multiple
- Page Entries in the same Inner Shareable domain use the same translation
- table entries for a given stage of translation for a particular translation
- regime.
+ Processing Elements in the same Inner Shareable domain use the same
+ translation table entries for a given stage of translation for a particular
+ translation regime.
ARMv7
~~~~~
diff --git a/docs/plat/hikey.rst b/docs/plat/hikey.rst
index 1c48104..99259f3 100644
--- a/docs/plat/hikey.rst
+++ b/docs/plat/hikey.rst
@@ -65,7 +65,7 @@
BUILDFLAGS=-DSERIAL_BASE=0xF8015000
- If your hikey hardware is built by LeMarker, nothing to do.
+ If your hikey hardware is built by LeMaker, nothing to do.
- Build it as debug mode. Create your own build script file or you could refer to **build\_uefi.sh** in l-loader git repository.
diff --git a/drivers/arm/gic/v3/arm_gicv3_common.c b/drivers/arm/gic/v3/arm_gicv3_common.c
index 8d552ca..c809732 100644
--- a/drivers/arm/gic/v3/arm_gicv3_common.c
+++ b/drivers/arm/gic/v3/arm_gicv3_common.c
@@ -84,6 +84,15 @@
assert(gicr_base);
/*
+ * If the GIC had power removed, the GICR_WAKER state will be reset.
+ * Since the GICR_WAKER.Sleep and GICR_WAKER.Quiescent bits are cleared,
+ * we can exit early. This also prevents the following assert from
+ * erroneously triggering.
+ */
+ if (!(gicr_read_waker(gicr_base) & WAKER_SL_BIT))
+ return;
+
+ /*
* Writes to GICR_WAKER.Sleep bit are ignored if GICR_WAKER.Quiescent
* bit is not set. We should be alright on power on path, therefore
* coming out of sleep and Quiescent should be set, but we assert in
diff --git a/drivers/delay_timer/delay_timer.c b/drivers/delay_timer/delay_timer.c
index 43f5af7..c9f84d7 100644
--- a/drivers/delay_timer/delay_timer.c
+++ b/drivers/delay_timer/delay_timer.c
@@ -7,6 +7,7 @@
#include <assert.h>
#include <delay_timer.h>
#include <platform_def.h>
+#include <utils_def.h>
/***********************************************************
* The delay timer implementation
@@ -30,7 +31,8 @@
start = ops->get_timer_value();
- total_delta = (usec * ops->clk_div) / ops->clk_mult;
+ /* Add an extra tick to avoid delaying less than requested. */
+ total_delta = div_round_up(usec * ops->clk_div, ops->clk_mult) + 1;
do {
/*
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 5e212ec..5f6bdc9 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -46,26 +46,12 @@
#define CTX_GPREG_SP_EL0 U(0xf8)
#define CTX_GPREGS_END U(0x100)
-#if WORKAROUND_CVE_2017_5715
-#define CTX_CVE_2017_5715_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
-#define CTX_CVE_2017_5715_QUAD0 U(0x0)
-#define CTX_CVE_2017_5715_QUAD1 U(0x8)
-#define CTX_CVE_2017_5715_QUAD2 U(0x10)
-#define CTX_CVE_2017_5715_QUAD3 U(0x18)
-#define CTX_CVE_2017_5715_QUAD4 U(0x20)
-#define CTX_CVE_2017_5715_QUAD5 U(0x28)
-#define CTX_CVE_2017_5715_END U(0x30)
-#else
-#define CTX_CVE_2017_5715_OFFSET CTX_GPREGS_OFFSET
-#define CTX_CVE_2017_5715_END CTX_GPREGS_END
-#endif
-
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'el3_state'
* structure at their correct offsets. Note that some of the registers are only
* 32-bits wide but are stored as 64-bit values for convenience
******************************************************************************/
-#define CTX_EL3STATE_OFFSET (CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_END)
+#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
#define CTX_SCR_EL3 U(0x0)
#define CTX_RUNTIME_SP U(0x8)
#define CTX_SPSR_EL3 U(0x10)
@@ -200,9 +186,6 @@
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
-#if WORKAROUND_CVE_2017_5715
-#define CTX_CVE_2017_5715_ALL (CTX_CVE_2017_5715_END >> DWORD_SHIFT)
-#endif
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
@@ -218,10 +201,6 @@
*/
DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
-#if WORKAROUND_CVE_2017_5715
-DEFINE_REG_STRUCT(cve_2017_5715_regs, CTX_CVE_2017_5715_ALL);
-#endif
-
/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during switches from one security state to
@@ -263,9 +242,6 @@
*/
typedef struct cpu_context {
gp_regs_t gpregs_ctx;
-#if WORKAROUND_CVE_2017_5715
- cve_2017_5715_regs_t cve_2017_5715_regs_ctx;
-#endif
el3_state_t el3state_ctx;
el1_sys_regs_t sysregs_ctx;
#if CTX_INCLUDE_FPREGS
diff --git a/include/lib/smcc.h b/include/lib/smcc.h
index 13b1e7a..a273b3a 100644
--- a/include/lib/smcc.h
+++ b/include/lib/smcc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -67,6 +67,11 @@
#include <cassert.h>
#include <stdint.h>
+#define SMCCC_MAJOR_VERSION U(1)
+#define SMCCC_MINOR_VERSION U(1)
+
+#define MAKE_SMCCC_VERSION(_major, _minor) (((_major) << 16) | (_minor))
+
/* Various flags passed to SMC handlers */
#define SMC_FROM_SECURE (U(0) << 0)
#define SMC_FROM_NON_SECURE (U(1) << 0)
@@ -78,6 +83,10 @@
#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
FUNCID_OEN_MASK) == OEN_STD_START)
+/* The macro below is used to identify a Arm Architectural Service SMC call */
+#define is_arm_arch_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
+ FUNCID_OEN_MASK) == OEN_ARM_START)
+
/* The macro below is used to identify a valid Fast SMC call */
#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & U(0xff))) && \
(GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
index bda3b07..ecb261a 100644
--- a/include/lib/utils_def.h
+++ b/include/lib/utils_def.h
@@ -24,6 +24,11 @@
*/
#define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d))
+#define div_round_up(val, div) __extension__ ({ \
+ __typeof__(div) _div = (div); \
+ ((val) + _div - 1) / _div; \
+})
+
#define MIN(x, y) __extension__ ({ \
__typeof__(x) _x = (x); \
__typeof__(y) _y = (y); \
@@ -55,11 +60,6 @@
#define round_down(value, boundary) \
((value) & ~round_boundary(value, boundary))
-#define div_round_up(val, div) __extension__ ({ \
- __typeof__(div) _div = (div); \
- round_up((val), _div)/_div; \
-})
-
/*
* Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
* Both arguments must be unsigned pointer values (i.e. uintptr_t).
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
new file mode 100644
index 0000000..2961601
--- /dev/null
+++ b/include/services/arm_arch_svc.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARM_ARCH_SVC_H__
+#define __ARM_ARCH_SVC_H__
+
+#define SMCCC_VERSION U(0x80000000)
+#define SMCCC_ARCH_FEATURES U(0x80000001)
+#define SMCCC_ARCH_WORKAROUND_1 U(0x80008000)
+
+#endif /* __ARM_ARCH_SVC_H__ */
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
index cd29266..cd82497 100644
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
@@ -1,20 +1,27 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
+#include <arm_arch_svc.h>
#include <asm_macros.S>
#include <context.h>
.globl workaround_bpiall_vbar0_runtime_exceptions
#define EMIT_BPIALL 0xee070fd5
-#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v
#define EMIT_SMC 0xe1600070
+#define ESR_EL3_A64_SMC0 0x5e000000
- .macro enter_workaround _stub_name
+ .macro enter_workaround _from_vector
+ /*
+ * Save register state to enable a call to AArch32 S-EL1 and return
+ * Identify the original calling vector in w2 (==_from_vector)
+ * Use w3-w6 for additional register state preservation while in S-EL1
+ */
+
/* Save GP regs */
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
@@ -32,47 +39,50 @@
stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+ /* Identify the original exception vector */
+ mov w2, \_from_vector
+
- adr x4, \_stub_name
+ /* Preserve 32-bit system registers in GP registers through the workaround */
+ mrs x3, esr_el3
+ mrs x4, spsr_el3
+ mrs x5, scr_el3
+ mrs x6, sctlr_el1
/*
- * Load SPSR_EL3 and VBAR_EL3. SPSR_EL3 is set up to have
- * all interrupts masked in preparation to running the workaround
- * stub in S-EL1. VBAR_EL3 points to the vector table that
- * will handle the SMC back from the workaround stub.
+ * Preserve LR and ELR_EL3 registers in the GP regs context.
+ * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
+ * through the workaround. This is OK because at this point the
+ * current state for this context's SP_EL0 is in the live system
+ * register, which is unmodified by the workaround.
*/
- ldp x0, x1, [x4, #0]
+ mrs x7, elr_el3
+ stp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
/*
- * Load SCTLR_EL1 and ELR_EL3. SCTLR_EL1 is configured to disable
- * the MMU in S-EL1. ELR_EL3 points to the appropriate stub in S-EL1.
+ * Load system registers for entry to S-EL1.
*/
- ldp x2, x3, [x4, #16]
- mrs x4, scr_el3
- mrs x5, spsr_el3
- mrs x6, elr_el3
- mrs x7, sctlr_el1
- mrs x8, esr_el3
+ /* Mask all interrupts and set AArch32 Supervisor mode */
+ movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
+
+ /* Switch EL3 exception vectors while the workaround is executing. */
+ adr x9, workaround_bpiall_vbar1_runtime_exceptions
+
+ /* Setup SCTLR_EL1 with MMU off and I$ on */
+ ldr x10, stub_sel1_sctlr
- /* Preserve system registers in the workaround context */
- stp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
- stp x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
- stp x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+ /* Land at the S-EL1 workaround stub */
+ adr x11, aarch32_stub
/*
* Setting SCR_EL3 to all zeroes means that the NS, RW
* and SMD bits are configured as expected.
*/
msr scr_el3, xzr
-
- /*
- * Reload system registers with the crafted values
- * in preparation for entry in S-EL1.
- */
- msr spsr_el3, x0
- msr vbar_el3, x1
- msr sctlr_el1, x2
- msr elr_el3, x3
+ msr spsr_el3, x8
+ msr vbar_el3, x9
+ msr sctlr_el1, x10
+ msr elr_el3, x11
eret
.endm
@@ -91,76 +101,31 @@
*/
vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
b sync_exception_sp_el0
+ nop /* to force 8 byte alignment for the following stub */
+
/*
* Since each vector table entry is 128 bytes, we can store the
* stub context in the unused space to minimize memory footprint.
*/
-aarch32_stub_smc:
+stub_sel1_sctlr:
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+
+aarch32_stub:
.word EMIT_BPIALL
- .word EMIT_MOV_R0_IMM(1)
.word EMIT_SMC
-aarch32_stub_ctx_smc:
- /* Mask all interrupts and set AArch32 Supervisor mode */
- .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
- SPSR_M_AARCH32 << SPSR_M_SHIFT | \
- MODE32_svc << MODE32_SHIFT)
- /*
- * VBAR_EL3 points to vbar1 which is the vector table
- * used while the workaround is executing.
- */
- .quad workaround_bpiall_vbar1_runtime_exceptions
-
- /* Setup SCTLR_EL1 with MMU off and I$ on */
- .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
-
- /* ELR_EL3 is setup to point to the sync exception stub in AArch32 */
- .quad aarch32_stub_smc
check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
vector_entry workaround_bpiall_vbar0_irq_sp_el0
b irq_sp_el0
-aarch32_stub_irq:
- .word EMIT_BPIALL
- .word EMIT_MOV_R0_IMM(2)
- .word EMIT_SMC
-aarch32_stub_ctx_irq:
- .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
- SPSR_M_AARCH32 << SPSR_M_SHIFT | \
- MODE32_svc << MODE32_SHIFT)
- .quad workaround_bpiall_vbar1_runtime_exceptions
- .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
- .quad aarch32_stub_irq
check_vector_size workaround_bpiall_vbar0_irq_sp_el0
vector_entry workaround_bpiall_vbar0_fiq_sp_el0
b fiq_sp_el0
-aarch32_stub_fiq:
- .word EMIT_BPIALL
- .word EMIT_MOV_R0_IMM(4)
- .word EMIT_SMC
-aarch32_stub_ctx_fiq:
- .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
- SPSR_M_AARCH32 << SPSR_M_SHIFT | \
- MODE32_svc << MODE32_SHIFT)
- .quad workaround_bpiall_vbar1_runtime_exceptions
- .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
- .quad aarch32_stub_fiq
check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
vector_entry workaround_bpiall_vbar0_serror_sp_el0
b serror_sp_el0
-aarch32_stub_serror:
- .word EMIT_BPIALL
- .word EMIT_MOV_R0_IMM(8)
- .word EMIT_SMC
-aarch32_stub_ctx_serror:
- .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
- SPSR_M_AARCH32 << SPSR_M_SHIFT | \
- MODE32_svc << MODE32_SHIFT)
- .quad workaround_bpiall_vbar1_runtime_exceptions
- .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
- .quad aarch32_stub_serror
check_vector_size workaround_bpiall_vbar0_serror_sp_el0
/* ---------------------------------------------------------------------
@@ -188,19 +153,19 @@
* ---------------------------------------------------------------------
*/
vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
- enter_workaround aarch32_stub_ctx_smc
+ enter_workaround 1
check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
vector_entry workaround_bpiall_vbar0_irq_aarch64
- enter_workaround aarch32_stub_ctx_irq
+ enter_workaround 2
check_vector_size workaround_bpiall_vbar0_irq_aarch64
vector_entry workaround_bpiall_vbar0_fiq_aarch64
- enter_workaround aarch32_stub_ctx_fiq
+ enter_workaround 4
check_vector_size workaround_bpiall_vbar0_fiq_aarch64
vector_entry workaround_bpiall_vbar0_serror_aarch64
- enter_workaround aarch32_stub_ctx_serror
+ enter_workaround 8
check_vector_size workaround_bpiall_vbar0_serror_aarch64
/* ---------------------------------------------------------------------
@@ -208,19 +173,19 @@
* ---------------------------------------------------------------------
*/
vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
- enter_workaround aarch32_stub_ctx_smc
+ enter_workaround 1
check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
vector_entry workaround_bpiall_vbar0_irq_aarch32
- enter_workaround aarch32_stub_ctx_irq
+ enter_workaround 2
check_vector_size workaround_bpiall_vbar0_irq_aarch32
vector_entry workaround_bpiall_vbar0_fiq_aarch32
- enter_workaround aarch32_stub_ctx_fiq
+ enter_workaround 4
check_vector_size workaround_bpiall_vbar0_fiq_aarch32
vector_entry workaround_bpiall_vbar0_serror_aarch32
- enter_workaround aarch32_stub_ctx_serror
+ enter_workaround 8
check_vector_size workaround_bpiall_vbar0_serror_aarch32
/* ---------------------------------------------------------------------
@@ -297,31 +262,33 @@
* ---------------------------------------------------------------------
*/
vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
- /* Restore register state from the workaround context */
- ldp x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
- ldp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
- ldp x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+ /*
+ * w2 indicates which SEL1 stub was run and thus which original vector was used
+ * w3-w6 contain saved system register state (esr_el3 in w3)
+ * Restore LR and ELR_EL3 register state from the GP regs context
+ */
+ ldp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
/* Apply the restored system register state */
- msr scr_el3, x2
- msr spsr_el3, x3
- msr elr_el3, x4
- msr sctlr_el1, x5
- msr esr_el3, x6
+ msr esr_el3, x3
+ msr spsr_el3, x4
+ msr scr_el3, x5
+ msr sctlr_el1, x6
+ msr elr_el3, x7
/*
* Workaround is complete, so swap VBAR_EL3 to point
* to workaround entry table in preparation for subsequent
* Sync/IRQ/FIQ/SError exceptions.
*/
- adr x2, workaround_bpiall_vbar0_runtime_exceptions
- msr vbar_el3, x2
+ adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x0
/*
- * Restore all GP regs except x0 and x1. The value in x0
+ * Restore all GP regs except x2 and x3 (esr). The value in x2
* indicates the type of the original exception.
*/
- ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
@@ -336,37 +303,55 @@
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+ /* Fast path Sync exceptions. Static predictor will fall through. */
+ tbz w2, #0, workaround_not_sync
+
/*
- * Each of these handlers will first restore x0 and x1 from
- * the context and the branch to the common implementation for
- * each of the exception types.
+ * Check if SMC is coming from A64 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
*/
- tbnz x0, #1, workaround_bpiall_vbar1_irq
- tbnz x0, #2, workaround_bpiall_vbar1_fiq
- tbnz x0, #3, workaround_bpiall_vbar1_serror
-
- /* Fallthrough case for Sync exception */
- ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w2
+ mov_imm w2, ESR_EL3_A64_SMC0
+ ccmp w3, w2, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ eret
+1:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b sync_exception_aarch64
check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
vector_entry workaround_bpiall_vbar1_irq_aarch32
b report_unhandled_interrupt
-workaround_bpiall_vbar1_irq:
- ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+
+ /*
+ * Post-workaround fan-out for non-sync exceptions
+ */
+workaround_not_sync:
+ tbnz w2, #3, workaround_bpiall_vbar1_serror
+ tbnz w2, #2, workaround_bpiall_vbar1_fiq
+ /* IRQ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b irq_aarch64
+
+workaround_bpiall_vbar1_fiq:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b fiq_aarch64
+
+workaround_bpiall_vbar1_serror:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b serror_aarch64
check_vector_size workaround_bpiall_vbar1_irq_aarch32
vector_entry workaround_bpiall_vbar1_fiq_aarch32
b report_unhandled_interrupt
-workaround_bpiall_vbar1_fiq:
- ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- b fiq_aarch64
check_vector_size workaround_bpiall_vbar1_fiq_aarch32
vector_entry workaround_bpiall_vbar1_serror_aarch32
b report_unhandled_exception
-workaround_bpiall_vbar1_serror:
- ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- b serror_aarch64
check_vector_size workaround_bpiall_vbar1_serror_aarch32
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
index f478148..b24b620 100644
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
@@ -1,26 +1,60 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
+#include <arm_arch_svc.h>
#include <asm_macros.S>
#include <context.h>
.globl workaround_mmu_runtime_exceptions
+#define ESR_EL3_A64_SMC0 0x5e000000
+
vector_base workaround_mmu_runtime_exceptions
- .macro apply_workaround
+ .macro apply_workaround _is_sync_exception
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- mrs x0, sctlr_el3
+ mrs x1, sctlr_el3
/* Disable MMU */
- bic x1, x0, #SCTLR_M_BIT
+ bic x1, x1, #SCTLR_M_BIT
msr sctlr_el3, x1
isb
- /* Restore MMU config */
- msr sctlr_el3, x0
+ /* Enable MMU */
+ orr x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ /*
+ * Defer ISB to avoid synchronizing twice in case we hit
+ * the workaround SMC call which will implicitly synchronize
+ * because of the ERET instruction.
+ */
+
+ /*
+ * Ensure SMC is coming from A64 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ .if \_is_sync_exception
+ orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w1
+ mrs x0, esr_el3
+ mov_imm w1, ESR_EL3_A64_SMC0
+ ccmp w0, w1, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ eret
+1:
+ .endif
+
+ /*
+ * Synchronize now to enable the MMU. This is required
+ * to ensure the load pair below reads the data stored earlier.
+ */
isb
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
.endm
@@ -70,22 +104,22 @@
* ---------------------------------------------------------------------
*/
vector_entry workaround_mmu_sync_exception_aarch64
- apply_workaround
+ apply_workaround _is_sync_exception=1
b sync_exception_aarch64
check_vector_size workaround_mmu_sync_exception_aarch64
vector_entry workaround_mmu_irq_aarch64
- apply_workaround
+ apply_workaround _is_sync_exception=0
b irq_aarch64
check_vector_size workaround_mmu_irq_aarch64
vector_entry workaround_mmu_fiq_aarch64
- apply_workaround
+ apply_workaround _is_sync_exception=0
b fiq_aarch64
check_vector_size workaround_mmu_fiq_aarch64
vector_entry workaround_mmu_serror_aarch64
- apply_workaround
+ apply_workaround _is_sync_exception=0
b serror_aarch64
check_vector_size workaround_mmu_serror_aarch64
@@ -94,21 +128,21 @@
* ---------------------------------------------------------------------
*/
vector_entry workaround_mmu_sync_exception_aarch32
- apply_workaround
+ apply_workaround _is_sync_exception=1
b sync_exception_aarch32
check_vector_size workaround_mmu_sync_exception_aarch32
vector_entry workaround_mmu_irq_aarch32
- apply_workaround
+ apply_workaround _is_sync_exception=0
b irq_aarch32
check_vector_size workaround_mmu_irq_aarch32
vector_entry workaround_mmu_fiq_aarch32
- apply_workaround
+ apply_workaround _is_sync_exception=0
b fiq_aarch32
check_vector_size workaround_mmu_fiq_aarch32
vector_entry workaround_mmu_serror_aarch32
- apply_workaround
+ apply_workaround _is_sync_exception=0
b serror_aarch32
check_vector_size workaround_mmu_serror_aarch32
diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c
index 8e41cf0..88cf5cb 100644
--- a/lib/psci/psci_main.c
+++ b/lib/psci/psci_main.c
@@ -1,11 +1,12 @@
/*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
+#include <arm_arch_svc.h>
#include <assert.h>
#include <debug.h>
#include <platform.h>
@@ -322,6 +323,9 @@
{
unsigned int local_caps = psci_caps;
+ if (psci_fid == SMCCC_VERSION)
+ return PSCI_E_SUCCESS;
+
/* Check if it is a 64 bit function */
if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
local_caps &= PSCI_CAP_64BIT_MASK;
diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c
index 6729863..600af61 100644
--- a/plat/arm/board/fvp/fvp_common.c
+++ b/plat/arm/board/fvp/fvp_common.c
@@ -7,6 +7,7 @@
#include <arm_config.h>
#include <arm_def.h>
#include <arm_spm_def.h>
+#include <arm_xlat_tables.h>
#include <assert.h>
#include <cci.h>
#include <ccn.h>
@@ -128,6 +129,9 @@
#if ENABLE_SPM && defined(IMAGE_BL31)
const mmap_region_t plat_arm_secure_partition_mmap[] = {
V2M_MAP_IOFPGA_EL0, /* for the UART */
+ MAP_REGION_FLAT(DEVICE0_BASE, \
+ DEVICE0_SIZE, \
+ MT_DEVICE | MT_RO | MT_SECURE | MT_USER),
ARM_SP_IMAGE_MMAP,
ARM_SP_IMAGE_NS_BUF_MMAP,
ARM_SP_IMAGE_RW_MMAP,
diff --git a/plat/hisilicon/hikey/hikey_bl2_setup.c b/plat/hisilicon/hikey/hikey_bl2_setup.c
index 86c205d..3f5e486 100644
--- a/plat/hisilicon/hikey/hikey_bl2_setup.c
+++ b/plat/hisilicon/hikey/hikey_bl2_setup.c
@@ -489,4 +489,5 @@
void bl2_platform_setup(void)
{
+ hikey_security_setup();
}
diff --git a/plat/hisilicon/hikey/hikey_private.h b/plat/hisilicon/hikey/hikey_private.h
index a7709b2..da98734 100644
--- a/plat/hisilicon/hikey/hikey_private.h
+++ b/plat/hisilicon/hikey/hikey_private.h
@@ -44,6 +44,7 @@
int hikey_flash(const char *arg);
int hikey_oem(const char *arg);
int hikey_reboot(const char *arg);
+void hikey_security_setup(void);
const char *hikey_init_serialno(void);
int hikey_read_serialno(struct random_serial_num *serialno);
diff --git a/plat/hisilicon/hikey/hikey_security.c b/plat/hisilicon/hikey/hikey_security.c
new file mode 100644
index 0000000..863ad2b
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_security.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <stdint.h>
+#include <strings.h>
+#include <utils_def.h>
+#include "hikey_private.h"
+
+#define PORTNUM_MAX 5
+
+#define MDDRC_SECURITY_BASE 0xF7121000
+
+struct int_en_reg {
+ unsigned in_en:1;
+ unsigned reserved:31;
+};
+
+struct rgn_map_reg {
+ unsigned rgn_base_addr:24;
+ unsigned rgn_size:6;
+ unsigned reserved:1;
+ unsigned rgn_en:1;
+};
+
+struct rgn_attr_reg {
+ unsigned sp:4;
+ unsigned security_inv:1;
+ unsigned reserved_0:3;
+ unsigned mid_en:1;
+ unsigned mid_inv:1;
+ unsigned reserved_1:6;
+ unsigned rgn_en:1;
+ unsigned subrgn_disable:16;
+};
+
+static volatile struct int_en_reg *get_int_en_reg(uint32_t base)
+{
+ uint64_t addr = base + 0x20;
+ return (struct int_en_reg *)addr;
+}
+
+static volatile struct rgn_map_reg *get_rgn_map_reg(uint32_t base, int region, int port)
+{
+ uint64_t addr = base + 0x100 + 0x10 * region + 0x400 * (uint64_t)port;
+ return (struct rgn_map_reg *)addr;
+}
+
+static volatile struct rgn_attr_reg *get_rgn_attr_reg(uint32_t base, int region,
+ int port)
+{
+ uint64_t addr = base + 0x104 + 0x10 * region + 0x400 * (uint64_t)port;
+ return (struct rgn_attr_reg *)addr;
+}
+
+/*
+ * Configure secure memory region
+ * region_size must be a power of 2 and at least 64KB
+ * region_base must be region_size aligned
+ */
+static void sec_protect(uint32_t region_base, uint32_t region_size,
+ int region)
+{
+ volatile struct int_en_reg *int_en;
+ volatile struct rgn_map_reg *rgn_map;
+ volatile struct rgn_attr_reg *rgn_attr;
+ uint32_t i = 0;
+
+ /* ensure secure region number is between 1-15 */
+ assert(region > 0 && region < 16);
+ /* ensure secure region size is a power of 2 >= 64KB */
+ assert(IS_POWER_OF_TWO(region_size) && region_size >= 0x10000);
+ /* ensure secure region address is aligned to region size */
+ assert(!(region_base & (region_size - 1)));
+
+ INFO("BL2: TrustZone: protecting %u bytes of memory at 0x%x\n", region_size,
+ region_base);
+
+ int_en = get_int_en_reg(MDDRC_SECURITY_BASE);
+ int_en->in_en = 0x1;
+
+ for (i = 0; i < PORTNUM_MAX; i++) {
+ rgn_map = get_rgn_map_reg(MDDRC_SECURITY_BASE, region, i);
+ rgn_attr = get_rgn_attr_reg(MDDRC_SECURITY_BASE, region, i);
+ rgn_map->rgn_base_addr = region_base >> 16;
+ rgn_attr->subrgn_disable = 0x0;
+ rgn_attr->sp = (i == 3) ? 0xC : 0x0;
+ rgn_map->rgn_size = __builtin_ffs(region_size) - 2;
+ rgn_map->rgn_en = 0x1;
+ }
+}
+
+/*******************************************************************************
+ * Initialize the secure environment.
+ ******************************************************************************/
+void hikey_security_setup(void)
+{
+ sec_protect(DDR_SEC_BASE, DDR_SEC_SIZE, 1);
+ sec_protect(DDR_SDP_BASE, DDR_SDP_SIZE, 2);
+}
diff --git a/plat/hisilicon/hikey/platform.mk b/plat/hisilicon/hikey/platform.mk
index 524fa6a..b11d208 100644
--- a/plat/hisilicon/hikey/platform.mk
+++ b/plat/hisilicon/hikey/platform.mk
@@ -78,6 +78,7 @@
drivers/synopsys/emmc/dw_mmc.c \
plat/hisilicon/hikey/aarch64/hikey_helpers.S \
plat/hisilicon/hikey/hikey_bl2_setup.c \
+ plat/hisilicon/hikey/hikey_security.c \
plat/hisilicon/hikey/hikey_ddr.c \
plat/hisilicon/hikey/hikey_io_storage.c \
plat/hisilicon/hikey/hisi_dvfs.c \
@@ -121,4 +122,6 @@
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
+WORKAROUND_CVE_2017_5715 := 0
+
FIP_ALIGN := 512
diff --git a/plat/hisilicon/poplar/platform.mk b/plat/hisilicon/poplar/platform.mk
index 2dbbac6..d53e062 100644
--- a/plat/hisilicon/poplar/platform.mk
+++ b/plat/hisilicon/poplar/platform.mk
@@ -29,6 +29,8 @@
ERRATA_A53_843419 := 1
ENABLE_SVE_FOR_NS := 0
+WORKAROUND_CVE_2017_5715 := 0
+
ARM_GIC_ARCH := 2
$(eval $(call add_define,ARM_GIC_ARCH))
diff --git a/plat/mediatek/mt6795/platform.mk b/plat/mediatek/mt6795/platform.mk
index 8230067..1bdf30a 100644
--- a/plat/mediatek/mt6795/platform.mk
+++ b/plat/mediatek/mt6795/platform.mk
@@ -61,6 +61,8 @@
ERRATA_A53_826319 := 1
ERRATA_A53_836870 := 1
+WORKAROUND_CVE_2017_5715 := 0
+
# indicate the reset vector address can be programmed
PROGRAMMABLE_RESET_ADDRESS := 1
diff --git a/plat/rockchip/common/plat_pm.c b/plat/rockchip/common/plat_pm.c
index cd88f60..352dbc8 100644
--- a/plat/rockchip/common/plat_pm.c
+++ b/plat/rockchip/common/plat_pm.c
@@ -246,14 +246,14 @@
if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
return;
+ /* Prevent interrupts from spuriously waking up this cpu */
+ plat_rockchip_gic_cpuif_disable();
+
if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
rockchip_soc_sys_pwr_dm_suspend();
else
rockchip_soc_cores_pwr_dm_suspend();
- /* Prevent interrupts from spuriously waking up this cpu */
- plat_rockchip_gic_cpuif_disable();
-
/* Perform the common cluster specific operations */
if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
plat_cci_disable();
diff --git a/plat/rockchip/rk3328/platform.mk b/plat/rockchip/rk3328/platform.mk
index 6e4d5b4..f0fd36f 100644
--- a/plat/rockchip/rk3328/platform.mk
+++ b/plat/rockchip/rk3328/platform.mk
@@ -58,3 +58,5 @@
# Do not enable SVE
ENABLE_SVE_FOR_NS := 0
+
+WORKAROUND_CVE_2017_5715 := 0
diff --git a/plat/rockchip/rk3368/platform.mk b/plat/rockchip/rk3368/platform.mk
index ad204e9..7ecb21a 100644
--- a/plat/rockchip/rk3368/platform.mk
+++ b/plat/rockchip/rk3368/platform.mk
@@ -57,3 +57,5 @@
# Do not enable SVE
ENABLE_SVE_FOR_NS := 0
+
+WORKAROUND_CVE_2017_5715 := 0
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.c b/plat/rockchip/rk3399/drivers/pmu/pmu.c
index c666c3c..51101a4 100644
--- a/plat/rockchip/rk3399/drivers/pmu/pmu.c
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.c
@@ -12,6 +12,7 @@
#include <delay_timer.h>
#include <dfs.h>
#include <errno.h>
+#include <gicv3.h>
#include <gpio.h>
#include <m0_ctl.h>
#include <mmio.h>
@@ -45,6 +46,8 @@
static uint32_t store_grf_ddrc_con[4];
static uint32_t store_wdt0[2];
static uint32_t store_wdt1[2];
+static gicv3_dist_ctx_t dist_ctx;
+static gicv3_redist_ctx_t rdist_ctx;
/*
* There are two ways to powering on or off on core.
@@ -79,9 +82,12 @@
do {
bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id;
bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id;
+ if (bus_state == bus_req && bus_ack == bus_req)
+ break;
+
wait_cnt++;
- } while ((bus_state != bus_req || bus_ack != bus_req) &&
- (wait_cnt < MAX_WAIT_COUNT));
+ udelay(1);
+ } while (wait_cnt < MAX_WAIT_COUNT);
if (bus_state != bus_req || bus_ack != bus_req) {
INFO("%s:st=%x(%x)\n", __func__,
@@ -95,7 +101,7 @@
struct pmu_slpdata_s pmu_slpdata;
-static void qos_save(void)
+static void qos_restore(void)
{
if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
RESTORE_QOS(pmu_slpdata.gpu_qos, GPU);
@@ -161,7 +167,7 @@
}
}
-static void qos_restore(void)
+static void qos_save(void)
{
if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
SAVE_QOS(pmu_slpdata.gpu_qos, GPU);
@@ -430,6 +436,7 @@
while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
BIT(STANDBY_BY_WFIL2_CLUSTER_B))) {
wait_cnt++;
+ udelay(1);
if (wait_cnt >= MAX_WAIT_COUNT)
ERROR("%s:wait cluster-b l2(%x)\n", __func__,
mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
@@ -1327,6 +1334,9 @@
dmc_suspend();
pmu_scu_b_pwrdn();
+ gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
+ gicv3_distif_save(&dist_ctx);
+
/* need to save usbphy before shutdown PERIHP PD */
save_usbphy();
@@ -1369,6 +1379,7 @@
mmio_read_32(PMU_BASE + PMU_ADB400_ST));
panic();
}
+ udelay(1);
}
mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN));
@@ -1462,6 +1473,7 @@
mmio_read_32(PMU_BASE + PMU_ADB400_ST));
panic();
}
+ udelay(1);
}
pmu_sgrf_rst_hld_release();
@@ -1481,6 +1493,8 @@
BIT(PMU_CLR_PERILPM0) |
BIT(PMU_CLR_GIC));
+ gicv3_distif_init_restore(&dist_ctx);
+ gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
plat_rockchip_gic_cpuif_enable();
m0_stop();
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.h b/plat/rockchip/rk3399/drivers/pmu/pmu.h
index 5c0ab4d..0265dde 100644
--- a/plat/rockchip/rk3399/drivers/pmu/pmu.h
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.h
@@ -53,7 +53,7 @@
#define TSADC_INT_PIN 38
#define CORES_PM_DISABLE 0x0
-#define PD_CTR_LOOP 500
+#define PD_CTR_LOOP 10000
#define CHK_CPU_LOOP 500
#define MAX_WAIT_COUNT 1000
diff --git a/plat/rockchip/rk3399/platform.mk b/plat/rockchip/rk3399/platform.mk
index 9e369e4..1997dfc 100644
--- a/plat/rockchip/rk3399/platform.mk
+++ b/plat/rockchip/rk3399/platform.mk
@@ -23,6 +23,8 @@
-I${RK_PLAT_SOC}/include/shared/ \
RK_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
+ drivers/arm/gic/v3/arm_gicv3_common.c \
+ drivers/arm/gic/v3/gic500.c \
drivers/arm/gic/v3/gicv3_main.c \
drivers/arm/gic/v3/gicv3_helpers.c \
plat/common/plat_gicv3.c \
diff --git a/plat/rpi3/platform.mk b/plat/rpi3/platform.mk
index 821f801..e201cee 100644
--- a/plat/rpi3/platform.mk
+++ b/plat/rpi3/platform.mk
@@ -64,6 +64,8 @@
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
+WORKAROUND_CVE_2017_5715 := 0
+
# Disable the PSCI platform compatibility layer by default
ENABLE_PLAT_COMPAT := 0
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index bdd194b..bddf305 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -14,6 +14,8 @@
# Do not enable SVE
ENABLE_SVE_FOR_NS := 0
+WORKAROUND_CVE_2017_5715 := 0
+
ifdef ZYNQMP_ATF_MEM_BASE
$(eval $(call add_define,ZYNQMP_ATF_MEM_BASE))
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
new file mode 100644
index 0000000..eedac86
--- /dev/null
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <runtime_svc.h>
+#include <smcc.h>
+#include <smcc_helpers.h>
+
+static int32_t smccc_version(void)
+{
+ return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
+}
+
+static int32_t smccc_arch_features(u_register_t arg)
+{
+ switch (arg) {
+ case SMCCC_VERSION:
+ case SMCCC_ARCH_FEATURES:
+ return SMC_OK;
+#if WORKAROUND_CVE_2017_5715
+ case SMCCC_ARCH_WORKAROUND_1:
+ return SMC_OK;
+#endif
+ default:
+ return SMC_UNK;
+ }
+}
+
+/*
+ * Top-level Arm Architectural Service SMC handler.
+ */
+uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ switch (smc_fid) {
+ case SMCCC_VERSION:
+ SMC_RET1(handle, smccc_version());
+ case SMCCC_ARCH_FEATURES:
+ SMC_RET1(handle, smccc_arch_features(x1));
+#if WORKAROUND_CVE_2017_5715
+ case SMCCC_ARCH_WORKAROUND_1:
+ /*
+ * The workaround has already been applied on affected PEs
+ * during entry to EL3. On unaffected PEs, this function
+ * has no effect.
+ */
+ SMC_RET0(handle);
+#endif
+ default:
+ WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
+ smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+/* Register Standard Service Calls as runtime service */
+DECLARE_RT_SVC(
+ arm_arch_svc,
+ OEN_ARM_START,
+ OEN_ARM_END,
+ SMC_TYPE_FAST,
+ NULL,
+ arm_arch_svc_smc_handler
+);
diff --git a/tools/fiptool/fiptool.c b/tools/fiptool/fiptool.c
index 33c451e..e70ff36 100644
--- a/tools/fiptool/fiptool.c
+++ b/tools/fiptool/fiptool.c
@@ -543,7 +543,6 @@
log_dbgx("Metadata size: %zu bytes", buf_size);
xfwrite(buf, buf_size, fp, filename);
- free(buf);
if (verbose)
log_dbgx("Payload size: %zu bytes", payload_size);
@@ -566,6 +565,7 @@
while (pad_size--)
fputc(0x0, fp);
+ free(buf);
fclose(fp);
return 0;
}