Merge pull request #1224 from masahir0y/gzip

Support GZIP-compressed images for faster loading and verification
diff --git a/.checkpatch.conf b/.checkpatch.conf
index 0c84fcd..e92b96f 100644
--- a/.checkpatch.conf
+++ b/.checkpatch.conf
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -48,6 +48,10 @@
 #      drivers/arm/gic/arm_gic.c:160:
 --showfile
 
+# Don't show some messages like the list of ignored types or the suggestion to
+# use "--fix" or report changes to the maintainers.
+--quiet
+
 #
 # Ignore the following message types, as they don't necessarily make sense in
 # the context of the Trusted Firmware.
@@ -78,6 +82,9 @@
 # We allow adding new typedefs in TF.
 --ignore NEW_TYPEDEFS
 
+# Avoid "Does not appear to be a unified-diff format patch" message
+--ignore NOT_UNIFIED_DIFF
+
 # VOLATILE reports this kind of messages:
 # "Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt"
 # We allow the usage of the volatile keyword in TF.
diff --git a/Makefile b/Makefile
index fab310b..73566c0 100644
--- a/Makefile
+++ b/Makefile
@@ -682,7 +682,14 @@
 
 checkpatch:		locate-checkpatch
 	@echo "  CHECKING STYLE"
-	${Q}git format-patch --stdout ${BASE_COMMIT}..HEAD -- ${CHECK_PATHS} | ${CHECKPATCH} - || true
+	${Q}COMMON_COMMIT=$$(git merge-base HEAD ${BASE_COMMIT});	\
+	for commit in `git rev-list $$COMMON_COMMIT..HEAD`; do		\
+		printf "\n[*] Checking style of '$$commit'\n\n";	\
+		git log --format=email "$$commit~..$$commit"		\
+			-- ${CHECK_PATHS} | ${CHECKPATCH} - || true;	\
+		git diff --format=email "$$commit~..$$commit"		\
+			-- ${CHECK_PATHS} | ${CHECKPATCH} - || true;	\
+	done
 
 certtool: ${CRTTOOL}
 
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 9b7735f..57c065c 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -153,7 +153,14 @@
 	.endm
 
 
-	.macro save_x18_to_x29_sp_el0
+	.macro save_x4_to_x29_sp_el0
+	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
 	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
 	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
 	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
@@ -297,34 +304,16 @@
 	/* Check whether aarch32 issued an SMC64 */
 	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
 
-	/*
-	 * Since we're are coming from aarch32, x8-x18 need to be saved as per
-	 * SMC32 calling convention. If a lower EL in aarch64 is making an
-	 * SMC32 call then it must have saved x8-x17 already therein.
-	 */
-	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
-	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
-	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
-	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
-	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
-
-	/* x4-x7, x18, sp_el0 are saved below */
-
 smc_handler64:
 	/*
 	 * Populate the parameters for the SMC handler.
 	 * We already have x0-x4 in place. x5 will point to a cookie (not used
 	 * now). x6 will point to the context structure (SP_EL3) and x7 will
-	 * contain flags we need to pass to the handler Hence save x5-x7.
+	 * contain flags we need to pass to the handler.
 	 *
-	 * Note: x4 only needs to be preserved for AArch32 callers but we do it
-	 *       for AArch64 callers as well for convenience
+	 * Save x4-x29 and sp_el0.  Refer to SMCCC v1.1.
 	 */
-	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
-	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
-
-	/* Save rest of the gpregs and sp_el0*/
-	save_x18_to_x29_sp_el0
+	save_x4_to_x29_sp_el0
 
 	mov	x5, xzr
 	mov	x6, sp
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 2db4856..886d301 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -23,6 +23,7 @@
 				bl31/bl31_context_mgmt.c			\
 				common/runtime_svc.c				\
 				plat/common/aarch64/platform_mp_stack.S		\
+				services/arm_arch_svc/arm_arch_svc_setup.c	\
 				services/std_svc/std_svc_setup.c		\
 				${PSCI_LIB_SOURCES}				\
 				${SPM_SOURCES}					\
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index 67a1981..145820e 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -23,7 +23,8 @@
 endif
 
 ifeq (${ENABLE_AMU}, 1)
-BL32_SOURCES		+=	lib/extensions/amu/aarch32/amu.c
+BL32_SOURCES		+=	lib/extensions/amu/aarch32/amu.c\
+				lib/extensions/amu/aarch32/amu_helpers.S
 endif
 
 ifeq (${WORKAROUND_CVE_2017_5715},1)
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 134d534..3624cc6 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -544,7 +544,7 @@
 #define AMCNTENCLR0	p15, 0, c13, c2, 4
 #define AMCNTENSET0	p15, 0, c13, c2, 5
 #define AMCNTENCLR1	p15, 0, c13, c3, 0
-#define AMCNTENSET1	p15, 0, c13, c1, 1
+#define AMCNTENSET1	p15, 0, c13, c3, 1
 
 /* Activity Monitor Group 0 Event Counter Registers */
 #define AMEVCNTR00	p15, 0, c0
@@ -558,4 +558,40 @@
 #define AMEVTYPER02	p15, 0, c13, c6, 2
 #define AMEVTYPER03	p15, 0, c13, c6, 3
 
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10	p15, 0, c4
+#define AMEVCNTR11	p15, 1, c4
+#define AMEVCNTR12	p15, 2, c4
+#define AMEVCNTR13	p15, 3, c4
+#define AMEVCNTR14	p15, 4, c4
+#define AMEVCNTR15	p15, 5, c4
+#define AMEVCNTR16	p15, 6, c4
+#define AMEVCNTR17	p15, 7, c4
+#define AMEVCNTR18	p15, 0, c5
+#define AMEVCNTR19	p15, 1, c5
+#define AMEVCNTR1A	p15, 2, c5
+#define AMEVCNTR1B	p15, 3, c5
+#define AMEVCNTR1C	p15, 4, c5
+#define AMEVCNTR1D	p15, 5, c5
+#define AMEVCNTR1E	p15, 6, c5
+#define AMEVCNTR1F	p15, 7, c5
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10	p15, 0, c13, c14, 0
+#define AMEVTYPER11	p15, 0, c13, c14, 1
+#define AMEVTYPER12	p15, 0, c13, c14, 2
+#define AMEVTYPER13	p15, 0, c13, c14, 3
+#define AMEVTYPER14	p15, 0, c13, c14, 4
+#define AMEVTYPER15	p15, 0, c13, c14, 5
+#define AMEVTYPER16	p15, 0, c13, c14, 6
+#define AMEVTYPER17	p15, 0, c13, c14, 7
+#define AMEVTYPER18	p15, 0, c13, c15, 0
+#define AMEVTYPER19	p15, 0, c13, c15, 1
+#define AMEVTYPER1A	p15, 0, c13, c15, 2
+#define AMEVTYPER1B	p15, 0, c13, c15, 3
+#define AMEVTYPER1C	p15, 0, c13, c15, 4
+#define AMEVTYPER1D	p15, 0, c13, c15, 5
+#define AMEVTYPER1E	p15, 0, c13, c15, 6
+#define AMEVTYPER1F	p15, 0, c13, c15, 7
+
 #endif /* __ARCH_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 5e212ec..5f6bdc9 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -46,26 +46,12 @@
 #define CTX_GPREG_SP_EL0	U(0xf8)
 #define CTX_GPREGS_END		U(0x100)
 
-#if WORKAROUND_CVE_2017_5715
-#define CTX_CVE_2017_5715_OFFSET	(CTX_GPREGS_OFFSET + CTX_GPREGS_END)
-#define CTX_CVE_2017_5715_QUAD0		U(0x0)
-#define CTX_CVE_2017_5715_QUAD1		U(0x8)
-#define	CTX_CVE_2017_5715_QUAD2		U(0x10)
-#define CTX_CVE_2017_5715_QUAD3		U(0x18)
-#define CTX_CVE_2017_5715_QUAD4		U(0x20)
-#define CTX_CVE_2017_5715_QUAD5		U(0x28)
-#define CTX_CVE_2017_5715_END		U(0x30)
-#else
-#define CTX_CVE_2017_5715_OFFSET	CTX_GPREGS_OFFSET
-#define CTX_CVE_2017_5715_END		CTX_GPREGS_END
-#endif
-
 /*******************************************************************************
  * Constants that allow assembler code to access members of and the 'el3_state'
  * structure at their correct offsets. Note that some of the registers are only
  * 32-bits wide but are stored as 64-bit values for convenience
  ******************************************************************************/
-#define CTX_EL3STATE_OFFSET	(CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_END)
+#define CTX_EL3STATE_OFFSET	(CTX_GPREGS_OFFSET + CTX_GPREGS_END)
 #define CTX_SCR_EL3		U(0x0)
 #define CTX_RUNTIME_SP		U(0x8)
 #define CTX_SPSR_EL3		U(0x10)
@@ -200,9 +186,6 @@
 
 /* Constants to determine the size of individual context structures */
 #define CTX_GPREG_ALL		(CTX_GPREGS_END >> DWORD_SHIFT)
-#if WORKAROUND_CVE_2017_5715
-#define CTX_CVE_2017_5715_ALL	(CTX_CVE_2017_5715_END >> DWORD_SHIFT)
-#endif
 #define CTX_SYSREG_ALL		(CTX_SYSREGS_END >> DWORD_SHIFT)
 #if CTX_INCLUDE_FPREGS
 #define CTX_FPREG_ALL		(CTX_FPREGS_END >> DWORD_SHIFT)
@@ -218,10 +201,6 @@
  */
 DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
 
-#if WORKAROUND_CVE_2017_5715
-DEFINE_REG_STRUCT(cve_2017_5715_regs, CTX_CVE_2017_5715_ALL);
-#endif
-
 /*
  * AArch64 EL1 system register context structure for preserving the
  * architectural state during switches from one security state to
@@ -263,9 +242,6 @@
  */
 typedef struct cpu_context {
 	gp_regs_t gpregs_ctx;
-#if WORKAROUND_CVE_2017_5715
-	cve_2017_5715_regs_t cve_2017_5715_regs_ctx;
-#endif
 	el3_state_t el3state_ctx;
 	el1_sys_regs_t sysregs_ctx;
 #if CTX_INCLUDE_FPREGS
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
index faa0ee1..559c8f1 100644
--- a/include/lib/extensions/amu.h
+++ b/include/lib/extensions/amu.h
@@ -7,10 +7,10 @@
 #ifndef __AMU_H__
 #define __AMU_H__
 
-#include <sys/cdefs.h> /* for CASSERT() */
 #include <cassert.h>
 #include <platform_def.h>
 #include <stdint.h>
+#include <sys/cdefs.h> /* for CASSERT() */
 
 /* All group 0 counters */
 #define AMU_GROUP0_COUNTERS_MASK	0xf
diff --git a/include/lib/smcc.h b/include/lib/smcc.h
index 13b1e7a..a273b3a 100644
--- a/include/lib/smcc.h
+++ b/include/lib/smcc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -67,6 +67,11 @@
 #include <cassert.h>
 #include <stdint.h>
 
+#define SMCCC_MAJOR_VERSION U(1)
+#define SMCCC_MINOR_VERSION U(1)
+
+#define MAKE_SMCCC_VERSION(_major, _minor) (((_major) << 16) | (_minor))
+
 /* Various flags passed to SMC handlers */
 #define SMC_FROM_SECURE		(U(0) << 0)
 #define SMC_FROM_NON_SECURE	(U(1) << 0)
@@ -78,6 +83,10 @@
 #define is_std_svc_call(_fid)		((((_fid) >> FUNCID_OEN_SHIFT) & \
 					   FUNCID_OEN_MASK) == OEN_STD_START)
 
+/* The macro below is used to identify a Arm Architectural Service SMC call */
+#define is_arm_arch_svc_call(_fid)	((((_fid) >> FUNCID_OEN_SHIFT) & \
+					   FUNCID_OEN_MASK) == OEN_ARM_START)
+
 /* The macro below is used to identify a valid Fast SMC call */
 #define is_valid_fast_smc(_fid)		((!(((_fid) >> 16) & U(0xff))) && \
 					   (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
new file mode 100644
index 0000000..2961601
--- /dev/null
+++ b/include/services/arm_arch_svc.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARM_ARCH_SVC_H__
+#define __ARM_ARCH_SVC_H__
+
+#define SMCCC_VERSION			U(0x80000000)
+#define SMCCC_ARCH_FEATURES		U(0x80000001)
+#define SMCCC_ARCH_WORKAROUND_1		U(0x80008000)
+
+#endif /* __ARM_ARCH_SVC_H__ */
diff --git a/lib/cpus/aarch64/cortex_a75_pubsub.c b/lib/cpus/aarch64/cortex_a75_pubsub.c
index c1089a6..a1ffcb0 100644
--- a/lib/cpus/aarch64/cortex_a75_pubsub.c
+++ b/lib/cpus/aarch64/cortex_a75_pubsub.c
@@ -5,8 +5,8 @@
  */
 
 #include <cortex_a75.h>
-#include <pubsub_events.h>
 #include <platform.h>
+#include <pubsub_events.h>
 
 struct amu_ctx {
 	uint64_t cnts[CORTEX_A75_AMU_NR_COUNTERS];
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
index cd29266..cd82497 100644
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
@@ -1,20 +1,27 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
+#include <arm_arch_svc.h>
 #include <asm_macros.S>
 #include <context.h>
 
 	.globl	workaround_bpiall_vbar0_runtime_exceptions
 
 #define EMIT_BPIALL		0xee070fd5
-#define EMIT_MOV_R0_IMM(v)	0xe3a0000##v
 #define EMIT_SMC		0xe1600070
+#define ESR_EL3_A64_SMC0	0x5e000000
 
-	.macro	enter_workaround _stub_name
+	.macro	enter_workaround _from_vector
+	/*
+	 * Save register state to enable a call to AArch32 S-EL1 and return
+	 * Identify the original calling vector in w2 (==_from_vector)
+	 * Use w3-w6 for additional register state preservation while in S-EL1
+	 */
+
 	/* Save GP regs */
 	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
 	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
@@ -32,47 +39,50 @@
 	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
 	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
 
+	/* Identify the original exception vector */
+	mov	w2, \_from_vector
+
-	adr	x4, \_stub_name
+	/* Preserve 32-bit system registers in GP registers through the workaround */
+	mrs	x3, esr_el3
+	mrs	x4, spsr_el3
+	mrs	x5, scr_el3
+	mrs	x6, sctlr_el1
 
 	/*
-	 * Load SPSR_EL3 and VBAR_EL3.  SPSR_EL3 is set up to have
-	 * all interrupts masked in preparation to running the workaround
-	 * stub in S-EL1.  VBAR_EL3 points to the vector table that
-	 * will handle the SMC back from the workaround stub.
+	 * Preserve LR and ELR_EL3 registers in the GP regs context.
+	 * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
+	 * through the workaround. This is OK because at this point the
+	 * current state for this context's SP_EL0 is in the live system
+	 * register, which is unmodified by the workaround.
 	 */
-	ldp	x0, x1, [x4, #0]
+	mrs	x7, elr_el3
+	stp	x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 
 	/*
-	 * Load SCTLR_EL1 and ELR_EL3.  SCTLR_EL1 is configured to disable
-	 * the MMU in S-EL1.  ELR_EL3 points to the appropriate stub in S-EL1.
+	 * Load system registers for entry to S-EL1.
 	 */
-	ldp	x2, x3, [x4, #16]
 
-	mrs	x4, scr_el3
-	mrs	x5, spsr_el3
-	mrs	x6, elr_el3
-	mrs	x7, sctlr_el1
-	mrs	x8, esr_el3
+	/* Mask all interrupts and set AArch32 Supervisor mode */
+	movz	w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
+
+	/* Switch EL3 exception vectors while the workaround is executing. */
+	adr	x9, workaround_bpiall_vbar1_runtime_exceptions
+
+	/* Setup SCTLR_EL1 with MMU off and I$ on */
+	ldr	x10, stub_sel1_sctlr
 
-	/* Preserve system registers in the workaround context */
-	stp	x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
-	stp	x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
-	stp	x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+	/* Land at the S-EL1 workaround stub */
+	adr	x11, aarch32_stub
 
 	/*
 	 * Setting SCR_EL3 to all zeroes means that the NS, RW
 	 * and SMD bits are configured as expected.
 	 */
 	msr	scr_el3, xzr
-
-	/*
-	 * Reload system registers with the crafted values
-	 * in preparation for entry in S-EL1.
-	 */
-	msr	spsr_el3, x0
-	msr	vbar_el3, x1
-	msr	sctlr_el1, x2
-	msr	elr_el3, x3
+	msr	spsr_el3, x8
+	msr	vbar_el3, x9
+	msr	sctlr_el1, x10
+	msr	elr_el3, x11
 
 	eret
 	.endm
@@ -91,76 +101,31 @@
 	 */
 vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
 	b	sync_exception_sp_el0
+	nop	/* to force 8 byte alignment for the following stub */
+
 	/*
 	 * Since each vector table entry is 128 bytes, we can store the
 	 * stub context in the unused space to minimize memory footprint.
 	 */
-aarch32_stub_smc:
+stub_sel1_sctlr:
+	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+
+aarch32_stub:
 	.word	EMIT_BPIALL
-	.word	EMIT_MOV_R0_IMM(1)
 	.word	EMIT_SMC
-aarch32_stub_ctx_smc:
-	/* Mask all interrupts and set AArch32 Supervisor mode */
-	.quad	(SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
-	         SPSR_M_AARCH32 << SPSR_M_SHIFT | \
-	         MODE32_svc << MODE32_SHIFT)
 
-	/*
-	 * VBAR_EL3 points to vbar1 which is the vector table
-	 * used while the workaround is executing.
-	 */
-	.quad	workaround_bpiall_vbar1_runtime_exceptions
-
-	/* Setup SCTLR_EL1 with MMU off and I$ on */
-	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
-
-	/* ELR_EL3 is setup to point to the sync exception stub in AArch32 */
-	.quad	aarch32_stub_smc
 	check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
 
 vector_entry workaround_bpiall_vbar0_irq_sp_el0
 	b	irq_sp_el0
-aarch32_stub_irq:
-	.word	EMIT_BPIALL
-	.word	EMIT_MOV_R0_IMM(2)
-	.word	EMIT_SMC
-aarch32_stub_ctx_irq:
-	.quad	(SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
-	         SPSR_M_AARCH32 << SPSR_M_SHIFT | \
-	         MODE32_svc << MODE32_SHIFT)
-	.quad	workaround_bpiall_vbar1_runtime_exceptions
-	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
-	.quad	aarch32_stub_irq
 	check_vector_size workaround_bpiall_vbar0_irq_sp_el0
 
 vector_entry workaround_bpiall_vbar0_fiq_sp_el0
 	b	fiq_sp_el0
-aarch32_stub_fiq:
-	.word	EMIT_BPIALL
-	.word	EMIT_MOV_R0_IMM(4)
-	.word	EMIT_SMC
-aarch32_stub_ctx_fiq:
-	.quad	(SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
-	         SPSR_M_AARCH32 << SPSR_M_SHIFT | \
-	         MODE32_svc << MODE32_SHIFT)
-	.quad	workaround_bpiall_vbar1_runtime_exceptions
-	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
-	.quad	aarch32_stub_fiq
 	check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
 
 vector_entry workaround_bpiall_vbar0_serror_sp_el0
 	b	serror_sp_el0
-aarch32_stub_serror:
-	.word	EMIT_BPIALL
-	.word	EMIT_MOV_R0_IMM(8)
-	.word	EMIT_SMC
-aarch32_stub_ctx_serror:
-	.quad	(SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
-	         SPSR_M_AARCH32 << SPSR_M_SHIFT | \
-	         MODE32_svc << MODE32_SHIFT)
-	.quad	workaround_bpiall_vbar1_runtime_exceptions
-	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
-	.quad	aarch32_stub_serror
 	check_vector_size workaround_bpiall_vbar0_serror_sp_el0
 
 	/* ---------------------------------------------------------------------
@@ -188,19 +153,19 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
-	enter_workaround aarch32_stub_ctx_smc
+	enter_workaround 1
 	check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
 
 vector_entry workaround_bpiall_vbar0_irq_aarch64
-	enter_workaround aarch32_stub_ctx_irq
+	enter_workaround 2
 	check_vector_size workaround_bpiall_vbar0_irq_aarch64
 
 vector_entry workaround_bpiall_vbar0_fiq_aarch64
-	enter_workaround aarch32_stub_ctx_fiq
+	enter_workaround 4
 	check_vector_size workaround_bpiall_vbar0_fiq_aarch64
 
 vector_entry workaround_bpiall_vbar0_serror_aarch64
-	enter_workaround aarch32_stub_ctx_serror
+	enter_workaround 8
 	check_vector_size workaround_bpiall_vbar0_serror_aarch64
 
 	/* ---------------------------------------------------------------------
@@ -208,19 +173,19 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
-	enter_workaround aarch32_stub_ctx_smc
+	enter_workaround 1
 	check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
 
 vector_entry workaround_bpiall_vbar0_irq_aarch32
-	enter_workaround aarch32_stub_ctx_irq
+	enter_workaround 2
 	check_vector_size workaround_bpiall_vbar0_irq_aarch32
 
 vector_entry workaround_bpiall_vbar0_fiq_aarch32
-	enter_workaround aarch32_stub_ctx_fiq
+	enter_workaround 4
 	check_vector_size workaround_bpiall_vbar0_fiq_aarch32
 
 vector_entry workaround_bpiall_vbar0_serror_aarch32
-	enter_workaround aarch32_stub_ctx_serror
+	enter_workaround 8
 	check_vector_size workaround_bpiall_vbar0_serror_aarch32
 
 	/* ---------------------------------------------------------------------
@@ -297,31 +262,33 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
-	/* Restore register state from the workaround context */
-	ldp	x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
-	ldp	x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
-	ldp	x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+	/*
+	 * w2 indicates which SEL1 stub was run and thus which original vector was used
+	 * w3-w6 contain saved system register state (esr_el3 in w3)
+	 * Restore LR and ELR_EL3 register state from the GP regs context
+	 */
+	ldp	x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 
 	/* Apply the restored system register state */
-	msr	scr_el3, x2
-	msr	spsr_el3, x3
-	msr	elr_el3, x4
-	msr	sctlr_el1, x5
-	msr	esr_el3, x6
+	msr	esr_el3, x3
+	msr	spsr_el3, x4
+	msr	scr_el3, x5
+	msr	sctlr_el1, x6
+	msr	elr_el3, x7
 
 	/*
 	 * Workaround is complete, so swap VBAR_EL3 to point
 	 * to workaround entry table in preparation for subsequent
 	 * Sync/IRQ/FIQ/SError exceptions.
 	 */
-	adr	x2, workaround_bpiall_vbar0_runtime_exceptions
-	msr	vbar_el3, x2
+	adr	x0, workaround_bpiall_vbar0_runtime_exceptions
+	msr	vbar_el3, x0
 
 	/*
-	 * Restore all GP regs except x0 and x1.  The value in x0
+	 * Restore all GP regs except x2 and x3 (esr).  The value in x2
 	 * indicates the type of the original exception.
 	 */
-	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
 	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
 	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
 	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
@@ -336,37 +303,55 @@
 	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
 	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
 
+	/* Fast path Sync exceptions.  Static predictor will fall through. */
+	tbz	w2, #0, workaround_not_sync
+
 	/*
-	 * Each of these handlers will first restore x0 and x1 from
-	 * the context and the branch to the common implementation for
-	 * each of the exception types.
+	 * Check if SMC is coming from A64 state on #0
+	 * with W0 = SMCCC_ARCH_WORKAROUND_1
+	 *
+	 * This sequence evaluates as:
+	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+	 * allowing use of a single branch operation
 	 */
-	tbnz	x0, #1, workaround_bpiall_vbar1_irq
-	tbnz	x0, #2, workaround_bpiall_vbar1_fiq
-	tbnz	x0, #3, workaround_bpiall_vbar1_serror
-
-	/* Fallthrough case for Sync exception */
-	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_1
+	cmp	w0, w2
+	mov_imm	w2, ESR_EL3_A64_SMC0
+	ccmp	w3, w2, #0, eq
+	/* Static predictor will predict a fall through */
+	bne	1f
+	eret
+1:
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	sync_exception_aarch64
 	check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
 
 vector_entry workaround_bpiall_vbar1_irq_aarch32
 	b	report_unhandled_interrupt
-workaround_bpiall_vbar1_irq:
-	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+
+	/*
+	 * Post-workaround fan-out for non-sync exceptions
+	 */
+workaround_not_sync:
+	tbnz	w2, #3, workaround_bpiall_vbar1_serror
+	tbnz	w2, #2, workaround_bpiall_vbar1_fiq
+	/* IRQ */
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	irq_aarch64
+
+workaround_bpiall_vbar1_fiq:
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	b	fiq_aarch64
+
+workaround_bpiall_vbar1_serror:
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	b	serror_aarch64
 	check_vector_size workaround_bpiall_vbar1_irq_aarch32
 
 vector_entry workaround_bpiall_vbar1_fiq_aarch32
 	b	report_unhandled_interrupt
-workaround_bpiall_vbar1_fiq:
-	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	b	fiq_aarch64
 	check_vector_size workaround_bpiall_vbar1_fiq_aarch32
 
 vector_entry workaround_bpiall_vbar1_serror_aarch32
 	b	report_unhandled_exception
-workaround_bpiall_vbar1_serror:
-	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	b	serror_aarch64
 	check_vector_size workaround_bpiall_vbar1_serror_aarch32
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
index f478148..b24b620 100644
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
@@ -1,26 +1,60 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
+#include <arm_arch_svc.h>
 #include <asm_macros.S>
 #include <context.h>
 
 	.globl	workaround_mmu_runtime_exceptions
 
+#define ESR_EL3_A64_SMC0	0x5e000000
+
 vector_base workaround_mmu_runtime_exceptions
 
-	.macro	apply_workaround
+	.macro	apply_workaround _is_sync_exception
 	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	mrs	x0, sctlr_el3
+	mrs	x1, sctlr_el3
 	/* Disable MMU */
-	bic	x1, x0, #SCTLR_M_BIT
+	bic	x1, x1, #SCTLR_M_BIT
 	msr	sctlr_el3, x1
 	isb
-	/* Restore MMU config */
-	msr	sctlr_el3, x0
+	/* Enable MMU */
+	orr	x1, x1, #SCTLR_M_BIT
+	msr	sctlr_el3, x1
+	/*
+	 * Defer ISB to avoid synchronizing twice in case we hit
+	 * the workaround SMC call which will implicitly synchronize
+	 * because of the ERET instruction.
+	 */
+
+	/*
+	 * Ensure SMC is coming from A64 state on #0
+	 * with W0 = SMCCC_ARCH_WORKAROUND_1
+	 *
+	 * This sequence evaluates as:
+	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+	 * allowing use of a single branch operation
+	 */
+	.if \_is_sync_exception
+		orr	w1, wzr, #SMCCC_ARCH_WORKAROUND_1
+		cmp	w0, w1
+		mrs	x0, esr_el3
+		mov_imm	w1, ESR_EL3_A64_SMC0
+		ccmp	w0, w1, #0, eq
+		/* Static predictor will predict a fall through */
+		bne	1f
+		eret
+1:
+	.endif
+
+	/*
+	 * Synchronize now to enable the MMU.  This is required
+	 * to ensure the load pair below reads the data stored earlier.
+	 */
 	isb
 	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
 	.endm
@@ -70,22 +104,22 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry workaround_mmu_sync_exception_aarch64
-	apply_workaround
+	apply_workaround _is_sync_exception=1
 	b	sync_exception_aarch64
 	check_vector_size workaround_mmu_sync_exception_aarch64
 
 vector_entry workaround_mmu_irq_aarch64
-	apply_workaround
+	apply_workaround _is_sync_exception=0
 	b	irq_aarch64
 	check_vector_size workaround_mmu_irq_aarch64
 
 vector_entry workaround_mmu_fiq_aarch64
-	apply_workaround
+	apply_workaround _is_sync_exception=0
 	b	fiq_aarch64
 	check_vector_size workaround_mmu_fiq_aarch64
 
 vector_entry workaround_mmu_serror_aarch64
-	apply_workaround
+	apply_workaround _is_sync_exception=0
 	b	serror_aarch64
 	check_vector_size workaround_mmu_serror_aarch64
 
@@ -94,21 +128,21 @@
 	 * ---------------------------------------------------------------------
 	 */
 vector_entry workaround_mmu_sync_exception_aarch32
-	apply_workaround
+	apply_workaround _is_sync_exception=1
 	b	sync_exception_aarch32
 	check_vector_size workaround_mmu_sync_exception_aarch32
 
 vector_entry workaround_mmu_irq_aarch32
-	apply_workaround
+	apply_workaround _is_sync_exception=0
 	b	irq_aarch32
 	check_vector_size workaround_mmu_irq_aarch32
 
 vector_entry workaround_mmu_fiq_aarch32
-	apply_workaround
+	apply_workaround _is_sync_exception=0
 	b	fiq_aarch32
 	check_vector_size workaround_mmu_fiq_aarch32
 
 vector_entry workaround_mmu_serror_aarch32
-	apply_workaround
+	apply_workaround _is_sync_exception=0
 	b	serror_aarch32
 	check_vector_size workaround_mmu_serror_aarch32
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index effc5bd..68cc4b3 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -5,6 +5,7 @@
  */
 
 #include <amu.h>
+#include <amu_private.h>
 #include <arch.h>
 #include <arch_helpers.h>
 #include <platform.h>
@@ -14,21 +15,26 @@
 
 struct amu_ctx {
 	uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+	uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
 };
 
 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
 
-void amu_enable(int el2_unused)
+int amu_supported(void)
 {
 	uint64_t features;
 
 	features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
-	if ((features & ID_PFR0_AMU_MASK) != 1)
+	return (features & ID_PFR0_AMU_MASK) == 1;
+}
+
+void amu_enable(int el2_unused)
+{
+	if (!amu_supported())
 		return;
 
 	if (el2_unused) {
 		uint64_t v;
-
 		/*
 		 * Non-secure access from EL0 or EL1 to the Activity Monitor
 		 * registers do not trap to EL2.
@@ -40,15 +46,64 @@
 
 	/* Enable group 0 counters */
 	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+
+	/* Enable group 1 counters */
+	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
+}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+	assert(amu_supported());
+	assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+	return amu_group0_cnt_read_internal(idx);
+}
+
+/* Write the group 0 counter identified by the given `idx` with `val`. */
+void amu_group0_cnt_write(int idx, uint64_t val)
+{
+	assert(amu_supported());
+	assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+	amu_group0_cnt_write_internal(idx, val);
+	isb();
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+	assert(amu_supported());
+	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+	return amu_group1_cnt_read_internal(idx);
+}
+
+/* Write the group 1 counter identified by the given `idx` with `val`. */
+void amu_group1_cnt_write(int idx, uint64_t val)
+{
+	assert(amu_supported());
+	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+	amu_group1_cnt_write_internal(idx, val);
+	isb();
+}
+
+void amu_group1_set_evtype(int idx, unsigned int val)
+{
+	assert(amu_supported());
+	assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+	amu_group1_set_evtype_internal(idx, val);
+	isb();
 }
 
 static void *amu_context_save(const void *arg)
 {
 	struct amu_ctx *ctx;
-	uint64_t features;
+	int i;
 
-	features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
-	if ((features & ID_PFR0_AMU_MASK) != 1)
+	if (!amu_supported())
 		return (void *)-1;
 
 	ctx = &amu_ctxs[plat_my_core_pos()];
@@ -61,12 +116,14 @@
 	 * counter values from the future via the memory mapped view.
 	 */
 	write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
+	write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
 	isb();
 
-	ctx->group0_cnts[0] = read64_amevcntr00();
-	ctx->group0_cnts[1] = read64_amevcntr01();
-	ctx->group0_cnts[2] = read64_amevcntr02();
-	ctx->group0_cnts[3] = read64_amevcntr03();
+	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+
+	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
 
 	return 0;
 }
@@ -75,6 +132,7 @@
 {
 	struct amu_ctx *ctx;
 	uint64_t features;
+	int i;
 
 	features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
 	if ((features & ID_PFR0_AMU_MASK) != 1)
@@ -86,19 +144,16 @@
 	assert(read_amcntenset0() == 0);
 
 	/* Restore group 0 counters */
-	if (AMU_GROUP0_COUNTERS_MASK & (1U << 0))
-		write64_amevcntr00(ctx->group0_cnts[0]);
-	if (AMU_GROUP0_COUNTERS_MASK & (1U << 1))
-		write64_amevcntr01(ctx->group0_cnts[1]);
-	if (AMU_GROUP0_COUNTERS_MASK & (1U << 2))
-		write64_amevcntr02(ctx->group0_cnts[2]);
-	if (AMU_GROUP0_COUNTERS_MASK & (1U << 3))
-		write64_amevcntr03(ctx->group0_cnts[3]);
-	isb();
+	for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
 
 	/* Enable group 0 counters */
 	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
 
+	/* Enable group 1 counters */
+	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
 	return 0;
 }
 
diff --git a/lib/extensions/amu/aarch32/amu_helpers.S b/lib/extensions/amu/aarch32/amu_helpers.S
new file mode 100644
index 0000000..84dca04
--- /dev/null
+++ b/lib/extensions/amu/aarch32/amu_helpers.S
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+	.globl	amu_group0_cnt_read_internal
+	.globl	amu_group0_cnt_write_internal
+	.globl	amu_group1_cnt_read_internal
+	.globl	amu_group1_cnt_write_internal
+	.globl	amu_group1_set_evtype_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `r0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 3] */
+	mov	r1, r0
+	lsr	r1, r1, #2
+	cmp	r1, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r1, 1f
+	lsl	r0, r0, #3	/* each ldcopr16/bx lr sequence is 8 bytes */
+	add	r1, r1, r0
+	bx	r1
+1:
+	ldcopr16	r0, r1, AMEVCNTR00	/* index 0 */
+	bx		lr
+	ldcopr16	r0, r1, AMEVCNTR01	/* index 1 */
+	bx 		lr
+	ldcopr16	r0, r1, AMEVCNTR02	/* index 2 */
+	bx 		lr
+	ldcopr16	r0, r1, AMEVCNTR03	/* index 3 */
+	bx 		lr
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * void amu_group0_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group0_cnt_write_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 3] */
+	mov	r2, r0
+	lsr	r2, r2, #2
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of stcopr16/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r2, 1f
+	lsl	r0, r0, #3	/* each stcopr16/bx lr sequence is 8 bytes */
+	add	r2, r2, r0
+	bx	r2
+
+1:
+	stcopr16	r0,r1, AMEVCNTR00	/* index 0 */
+	bx 		lr
+	stcopr16	r0,r1, AMEVCNTR01	/* index 1 */
+	bx 		lr
+	stcopr16	r0,r1, AMEVCNTR02	/* index 2 */
+	bx 		lr
+	stcopr16	r0,r1, AMEVCNTR03	/* index 3 */
+	bx 		lr
+endfunc amu_group0_cnt_write_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `r0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 15] */
+	mov	r2, r0
+	lsr	r2, r2, #4
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r1, 1f
+	lsl	r0, r0, #3	/* each ldcopr16/bx lr sequence is 8 bytes */
+	add	r1, r1, r0
+	bx	r1
+
+1:
+	ldcopr16	r0,r1, AMEVCNTR10	/* index 0 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR11	/* index 1 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR12	/* index 2 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR13	/* index 3 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR14	/* index 4 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR15	/* index 5 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR16	/* index 6 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR17	/* index 7 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR18	/* index 8 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR19	/* index 9 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1A	/* index 10 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1B	/* index 11 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1C	/* index 12 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1D	/* index 13 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1E	/* index 14 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1F	/* index 15 */
+	bx	lr
+endfunc amu_group1_cnt_read_internal
+
+/*
+ * void amu_group1_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group1_cnt_write_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 15] */
+	mov	r2, r0
+	lsr	r2, r2, #4
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r2, 1f
+	lsl	r0, r0, #3	/* each stcopr16/bx lr sequence is 8 bytes */
+	add	r2, r2, r0
+	bx	r2
+
+1:
+	stcopr16	r0,r1,	AMEVCNTR10	/* index 0 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR11	/* index 1 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR12	/* index 2 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR13	/* index 3 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR14	/* index 4 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR15	/* index 5 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR16	/* index 6 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR17	/* index 7 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR18	/* index 8 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR19	/* index 9 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1A	/* index 10 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1B	/* index 11 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1C	/* index 12 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1D	/* index 13 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1E	/* index 14 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1F	/* index 15 */
+	bx		lr
+endfunc amu_group1_cnt_write_internal
+
+/*
+ * void amu_group1_set_evtype_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_set_evtype_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 15] */
+	mov	r2, r0
+	lsr	r2, r2, #4
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+
+	/* val should be between [0, 65535] */
+	mov	r2, r1
+	lsr	r2, r2, #16
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of stcopr/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r2, 1f
+	lsl	r0, r0, #3	/* each stcopr/bx lr sequence is 8 bytes */
+	add	r2, r2, r0
+	bx	r2
+
+1:
+	stcopr	r0,	AMEVTYPER10 /* index 0 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER11 /* index 1 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER12 /* index 2 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER13 /* index 3 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER14 /* index 4 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER15 /* index 5 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER16 /* index 6 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER17 /* index 7 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER18 /* index 8 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER19 /* index 9 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1A /* index 10 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1B /* index 11 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1C /* index 12 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1D /* index 13 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1E /* index 14 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1F /* index 15 */
+	bx	lr
+endfunc amu_group1_set_evtype_internal
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index d7645a9..7d39f35 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -172,7 +172,6 @@
 	for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
 		if (AMU_GROUP1_COUNTERS_MASK & (1U << i))
 			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
-	isb();
 
 	/* Restore group 0/1 counter configuration */
 	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c
index 8e41cf0..88cf5cb 100644
--- a/lib/psci/psci_main.c
+++ b/lib/psci/psci_main.c
@@ -1,11 +1,12 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <arch_helpers.h>
+#include <arm_arch_svc.h>
 #include <assert.h>
 #include <debug.h>
 #include <platform.h>
@@ -322,6 +323,9 @@
 {
 	unsigned int local_caps = psci_caps;
 
+	if (psci_fid == SMCCC_VERSION)
+		return PSCI_E_SUCCESS;
+
 	/* Check if it is a 64 bit function */
 	if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
 		local_caps &= PSCI_CAP_64BIT_MASK;
diff --git a/plat/hisilicon/hikey/platform.mk b/plat/hisilicon/hikey/platform.mk
index 4db21e6..d43b20b 100644
--- a/plat/hisilicon/hikey/platform.mk
+++ b/plat/hisilicon/hikey/platform.mk
@@ -122,4 +122,6 @@
 ERRATA_A53_843419		:=	1
 ERRATA_A53_855873		:=	1
 
+WORKAROUND_CVE_2017_5715	:=	0
+
 FIP_ALIGN			:=	512
diff --git a/plat/hisilicon/poplar/platform.mk b/plat/hisilicon/poplar/platform.mk
index 2dbbac6..d53e062 100644
--- a/plat/hisilicon/poplar/platform.mk
+++ b/plat/hisilicon/poplar/platform.mk
@@ -29,6 +29,8 @@
 ERRATA_A53_843419		:= 1
 ENABLE_SVE_FOR_NS		:= 0
 
+WORKAROUND_CVE_2017_5715	:= 0
+
 ARM_GIC_ARCH			:= 2
 $(eval $(call add_define,ARM_GIC_ARCH))
 
diff --git a/plat/mediatek/mt6795/platform.mk b/plat/mediatek/mt6795/platform.mk
index 8230067..1bdf30a 100644
--- a/plat/mediatek/mt6795/platform.mk
+++ b/plat/mediatek/mt6795/platform.mk
@@ -61,6 +61,8 @@
 ERRATA_A53_826319	:=	1
 ERRATA_A53_836870	:=	1
 
+WORKAROUND_CVE_2017_5715	:=	0
+
 # indicate the reset vector address can be programmed
 PROGRAMMABLE_RESET_ADDRESS	:=	1
 
diff --git a/plat/rockchip/rk3328/platform.mk b/plat/rockchip/rk3328/platform.mk
index 6e4d5b4..f0fd36f 100644
--- a/plat/rockchip/rk3328/platform.mk
+++ b/plat/rockchip/rk3328/platform.mk
@@ -58,3 +58,5 @@
 
 # Do not enable SVE
 ENABLE_SVE_FOR_NS	:=	0
+
+WORKAROUND_CVE_2017_5715	:=	0
diff --git a/plat/rockchip/rk3368/platform.mk b/plat/rockchip/rk3368/platform.mk
index ad204e9..7ecb21a 100644
--- a/plat/rockchip/rk3368/platform.mk
+++ b/plat/rockchip/rk3368/platform.mk
@@ -57,3 +57,5 @@
 
 # Do not enable SVE
 ENABLE_SVE_FOR_NS	:=	0
+
+WORKAROUND_CVE_2017_5715	:=	0
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.c b/plat/rockchip/rk3399/drivers/pmu/pmu.c
index 51101a4..f4893ef 100644
--- a/plat/rockchip/rk3399/drivers/pmu/pmu.c
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.c
@@ -33,7 +33,7 @@
 
 static uint32_t cpu_warm_boot_addr;
 static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT];
-static uint32_t store_cru[CRU_SDIO0_CON1 / 4];
+static uint32_t store_cru[CRU_SDIO0_CON1 / 4 + 1];
 static uint32_t store_usbphy0[7];
 static uint32_t store_usbphy1[7];
 static uint32_t store_grf_io_vsel;
diff --git a/plat/rpi3/platform.mk b/plat/rpi3/platform.mk
index 821f801..e201cee 100644
--- a/plat/rpi3/platform.mk
+++ b/plat/rpi3/platform.mk
@@ -64,6 +64,8 @@
 ERRATA_A53_843419		:= 1
 ERRATA_A53_855873		:= 1
 
+WORKAROUND_CVE_2017_5715	:= 0
+
 # Disable the PSCI platform compatibility layer by default
 ENABLE_PLAT_COMPAT		:= 0
 
diff --git a/plat/socionext/uniphier/include/platform_def.h b/plat/socionext/uniphier/include/platform_def.h
index 546670e..301aa14 100644
--- a/plat/socionext/uniphier/include/platform_def.h
+++ b/plat/socionext/uniphier/include/platform_def.h
@@ -9,6 +9,7 @@
 
 #include <common_def.h>
 #include <tbbr/tbbr_img_def.h>
+#include <utils_def.h>
 
 #define PLATFORM_STACK_SIZE		0x1000
 
@@ -27,26 +28,28 @@
 #define PLAT_MAX_OFF_STATE		2
 #define PLAT_MAX_RET_STATE		1
 
-#define UNIPHIER_SEC_DRAM_BASE		0x80000000
-#define UNIPHIER_SEC_DRAM_LIMIT		0x82000000
+#define BL2_BASE			ULL(0x80000000)
+#define BL2_LIMIT			ULL(0x80080000)
+
+/* 0x80080000-0x81000000: reserved for DSP */
+
+#define UNIPHIER_SEC_DRAM_BASE		0x81000000ULL
+#define UNIPHIER_SEC_DRAM_LIMIT		0x82000000ULL
 #define UNIPHIER_SEC_DRAM_SIZE		((UNIPHIER_SEC_DRAM_LIMIT) - \
 					 (UNIPHIER_SEC_DRAM_BASE))
 
-#define BL2_BASE			(UNIPHIER_SEC_DRAM_BASE)
-#define BL2_LIMIT			((BL2_BASE) + 0x00020000)
+#define BL31_BASE			ULL(0x81000000)
+#define BL31_LIMIT			ULL(0x81080000)
 
-#define BL31_BASE			(BL2_LIMIT)
-#define BL31_LIMIT			((BL31_BASE) + 0x00080000)
-
-#define BL32_BASE			(BL31_LIMIT)
-#define BL32_LIMIT			(UNIPHIER_SEC_DRAM_LIMIT)
+#define BL32_BASE			ULL(0x81080000)
+#define BL32_LIMIT			ULL(0x81180000)
 
 #define PLAT_PHY_ADDR_SPACE_SIZE	(1ULL << 32)
 #define PLAT_VIRT_ADDR_SPACE_SIZE	(1ULL << 32)
 
 #define PLAT_XLAT_TABLES_DYNAMIC	1
 #define MAX_XLAT_TABLES			7
-#define MAX_MMAP_REGIONS		6
+#define MAX_MMAP_REGIONS		7
 
 #define MAX_IO_HANDLES			2
 #define MAX_IO_DEVICES			2
diff --git a/plat/socionext/uniphier/uniphier_bl2_setup.c b/plat/socionext/uniphier/uniphier_bl2_setup.c
index 9bf866a..54b30e5 100644
--- a/plat/socionext/uniphier/uniphier_bl2_setup.c
+++ b/plat/socionext/uniphier/uniphier_bl2_setup.c
@@ -19,6 +19,9 @@
 
 #include "uniphier.h"
 
+#define BL2_END			(unsigned long)(&__BL2_END__)
+#define BL2_SIZE		((BL2_END) - (BL2_BASE))
+
 static int uniphier_bl2_kick_scp;
 
 void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1,
@@ -28,6 +31,9 @@
 }
 
 static const struct mmap_region uniphier_bl2_mmap[] = {
+	/* for BL31, BL32 */
+	MAP_REGION_FLAT(UNIPHIER_SEC_DRAM_BASE, UNIPHIER_SEC_DRAM_SIZE,
+			MT_MEMORY | MT_RW | MT_SECURE),
 	/* for SCP, BL33 */
 	MAP_REGION_FLAT(UNIPHIER_NS_DRAM_BASE, UNIPHIER_NS_DRAM_SIZE,
 			MT_MEMORY | MT_RW | MT_NS),
@@ -40,8 +46,7 @@
 	int skip_scp = 0;
 	int ret;
 
-	uniphier_mmap_setup(UNIPHIER_SEC_DRAM_BASE, UNIPHIER_SEC_DRAM_SIZE,
-			    uniphier_bl2_mmap);
+	uniphier_mmap_setup(BL2_BASE, BL2_SIZE, uniphier_bl2_mmap);
 	enable_mmu_el3(0);
 
 	soc = uniphier_get_soc_id();
diff --git a/plat/socionext/uniphier/uniphier_io_storage.c b/plat/socionext/uniphier/uniphier_io_storage.c
index bc31350..ed1f6fa 100644
--- a/plat/socionext/uniphier/uniphier_io_storage.c
+++ b/plat/socionext/uniphier/uniphier_io_storage.c
@@ -18,8 +18,11 @@
 
 #include "uniphier.h"
 
-#define UNIPHIER_ROM_REGION_BASE	0x00000000
-#define UNIPHIER_ROM_REGION_SIZE	0x10000000
+#define UNIPHIER_ROM_REGION_BASE	0x00000000ULL
+#define UNIPHIER_ROM_REGION_SIZE	0x10000000ULL
+
+#define UNIPHIER_OCM_REGION_BASE	0x30000000ULL
+#define UNIPHIER_OCM_REGION_SIZE	0x00040000ULL
 
 static const io_dev_connector_t *uniphier_fip_dev_con;
 static uintptr_t uniphier_fip_dev_handle;
@@ -271,6 +274,18 @@
 	if (ret)
 		return ret;
 
+	/*
+	 * on-chip SRAM region: should be DEVICE attribute because the USB
+	 * load functions provided by the ROM use this memory region as a work
+	 * area, but do not cater to cache coherency.
+	 */
+	ret = mmap_add_dynamic_region(UNIPHIER_OCM_REGION_BASE,
+				      UNIPHIER_OCM_REGION_BASE,
+				      UNIPHIER_OCM_REGION_SIZE,
+				      MT_DEVICE | MT_RW | MT_SECURE);
+	if (ret)
+		return ret;
+
 	ret = uniphier_usb_init(soc_id, &block_dev_spec);
 	if (ret)
 		return ret;
diff --git a/plat/socionext/uniphier/uniphier_xlat_setup.c b/plat/socionext/uniphier/uniphier_xlat_setup.c
index 6532c49..3112ecd 100644
--- a/plat/socionext/uniphier/uniphier_xlat_setup.c
+++ b/plat/socionext/uniphier/uniphier_xlat_setup.c
@@ -8,11 +8,8 @@
 #include <platform_def.h>
 #include <xlat_tables_v2.h>
 
-#define UNIPHIER_OCM_REGION_BASE	0x30000000
-#define UNIPHIER_OCM_REGION_SIZE	0x00040000
-
-#define UNIPHIER_REG_REGION_BASE	0x50000000
-#define UNIPHIER_REG_REGION_SIZE	0x20000000
+#define UNIPHIER_REG_REGION_BASE	0x50000000ULL
+#define UNIPHIER_REG_REGION_SIZE	0x20000000ULL
 
 void uniphier_mmap_setup(uintptr_t total_base, size_t total_size,
 			 const struct mmap_region *mmap)
@@ -37,15 +34,6 @@
 			BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
 			MT_DEVICE | MT_RW | MT_SECURE);
 
-	/*
-	 * on-chip SRAM region: should be DEVICE attribute because the USB
-	 * load functions provided by the ROM use this memory region as a work
-	 * area, but do not cater to cache coherency.
-	 */
-	mmap_add_region(UNIPHIER_OCM_REGION_BASE, UNIPHIER_OCM_REGION_BASE,
-			UNIPHIER_OCM_REGION_SIZE,
-			MT_DEVICE | MT_RW | MT_SECURE);
-
 	/* register region */
 	mmap_add_region(UNIPHIER_REG_REGION_BASE, UNIPHIER_REG_REGION_BASE,
 			UNIPHIER_REG_REGION_SIZE,
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index bdd194b..bddf305 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -14,6 +14,8 @@
 # Do not enable SVE
 ENABLE_SVE_FOR_NS	:= 0
 
+WORKAROUND_CVE_2017_5715	:=	0
+
 ifdef ZYNQMP_ATF_MEM_BASE
     $(eval $(call add_define,ZYNQMP_ATF_MEM_BASE))
 
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
new file mode 100644
index 0000000..eedac86
--- /dev/null
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <runtime_svc.h>
+#include <smcc.h>
+#include <smcc_helpers.h>
+
+static int32_t smccc_version(void)
+{
+	return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
+}
+
+static int32_t smccc_arch_features(u_register_t arg)
+{
+	switch (arg) {
+	case SMCCC_VERSION:
+	case SMCCC_ARCH_FEATURES:
+		return SMC_OK;
+#if WORKAROUND_CVE_2017_5715
+	case SMCCC_ARCH_WORKAROUND_1:
+		return SMC_OK;
+#endif
+	default:
+		return SMC_UNK;
+	}
+}
+
+/*
+ * Top-level Arm Architectural Service SMC handler.
+ */
+uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
+	u_register_t x1,
+	u_register_t x2,
+	u_register_t x3,
+	u_register_t x4,
+	void *cookie,
+	void *handle,
+	u_register_t flags)
+{
+	switch (smc_fid) {
+	case SMCCC_VERSION:
+		SMC_RET1(handle, smccc_version());
+	case SMCCC_ARCH_FEATURES:
+		SMC_RET1(handle, smccc_arch_features(x1));
+#if WORKAROUND_CVE_2017_5715
+	case SMCCC_ARCH_WORKAROUND_1:
+		/*
+		 * The workaround has already been applied on affected PEs
+		 * during entry to EL3.  On unaffected PEs, this function
+		 * has no effect.
+		 */
+		SMC_RET0(handle);
+#endif
+	default:
+		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
+			smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
+
+/* Register Standard Service Calls as runtime service */
+DECLARE_RT_SVC(
+		arm_arch_svc,
+		OEN_ARM_START,
+		OEN_ARM_END,
+		SMC_TYPE_FAST,
+		NULL,
+		arm_arch_svc_smc_handler
+);