Rename symbols and files relating to CVE-2017-5715

This patch renames symbols and files relating to CVE-2017-5715 to make
it easier to introduce new symbols and files for new CVE mitigations.

Change-Id: I24c23822862ca73648c772885f1690bed043dbc7
Signed-off-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 0e47ddf..a6c0a9a 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -61,8 +61,8 @@
 endif
 
 ifeq (${WORKAROUND_CVE_2017_5715},1)
-BL31_SOURCES		+=	lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S	\
-				lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+BL31_SOURCES		+=	lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S	\
+				lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
 endif
 
 BL31_LINKERFILE		:=	bl31/bl31.ld.S
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index 193b1d5..6233299 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -29,8 +29,8 @@
 endif
 
 ifeq (${WORKAROUND_CVE_2017_5715},1)
-BL32_SOURCES		+=	bl32/sp_min/workaround_cve_2017_5715_bpiall.S	\
-				bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
+BL32_SOURCES		+=	bl32/sp_min/wa_cve_2017_5715_bpiall.S	\
+				bl32/sp_min/wa_cve_2017_5715_icache_inv.S
 endif
 
 BL32_LINKERFILE	:=	bl32/sp_min/sp_min.ld.S
diff --git a/bl32/sp_min/workaround_cve_2017_5715_bpiall.S b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
similarity index 94%
rename from bl32/sp_min/workaround_cve_2017_5715_bpiall.S
rename to bl32/sp_min/wa_cve_2017_5715_bpiall.S
index 5387cef..385f3d4 100644
--- a/bl32/sp_min/workaround_cve_2017_5715_bpiall.S
+++ b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
@@ -6,9 +6,9 @@
 
 #include <asm_macros.S>
 
-	.globl	workaround_bpiall_runtime_exceptions
+	.globl	wa_cve_2017_5715_bpiall_vbar
 
-vector_base workaround_bpiall_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_vbar
 	/* We encode the exception entry in the bottom 3 bits of SP */
 	add	sp, sp, #1	/* Reset: 0b111 */
 	add	sp, sp, #1	/* Undef: 0b110 */
diff --git a/bl32/sp_min/workaround_cve_2017_5715_icache_inv.S b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
similarity index 94%
rename from bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
rename to bl32/sp_min/wa_cve_2017_5715_icache_inv.S
index 9102b02..d0a4625 100644
--- a/bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
+++ b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
@@ -6,9 +6,9 @@
 
 #include <asm_macros.S>
 
-	.globl	workaround_icache_inv_runtime_exceptions
+	.globl	wa_cve_2017_5715_icache_inv_vbar
 
-vector_base workaround_icache_inv_runtime_exceptions
+vector_base wa_cve_2017_5715_icache_inv_vbar
 	/* We encode the exception entry in the bottom 3 bits of SP */
 	add	sp, sp, #1	/* Reset: 0b111 */
 	add	sp, sp, #1	/* Undef: 0b110 */
diff --git a/include/lib/cpus/wa_cve_2017_5715.h b/include/lib/cpus/wa_cve_2017_5715.h
new file mode 100644
index 0000000..0a65a56
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2017_5715.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __WA_CVE_2017_5715_H__
+#define __WA_CVE_2017_5715_H__
+
+int check_wa_cve_2017_5715(void);
+
+#endif /* __WA_CVE_2017_5715_H__ */
diff --git a/include/lib/cpus/workaround_cve_2017_5715.h b/include/lib/cpus/workaround_cve_2017_5715.h
deleted file mode 100644
index e837a67..0000000
--- a/include/lib/cpus/workaround_cve_2017_5715.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef __WORKAROUND_CVE_2017_5715_H__
-#define __WORKAROUND_CVE_2017_5715_H__
-
-int check_workaround_cve_2017_5715(void);
-
-#endif /* __WORKAROUND_CVE_2017_5715_H__ */
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index 4d072e1..8470c6c 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -393,7 +393,7 @@
 #endif
 
 #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
-	adr	x0, workaround_mmu_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_mmu_vbar
 	msr	vbar_el3, x0
 #endif
 
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 29fa77b..b67c987 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -126,7 +126,7 @@
 
 #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
 	cpu_check_csv2	x0, 1f
-	adr	x0, workaround_mmu_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_mmu_vbar
 	msr	vbar_el3, x0
 1:
 #endif
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 0a961ea..c66067d 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -38,7 +38,7 @@
 func cortex_a73_reset_func
 #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
 	cpu_check_csv2	x0, 1f
-	adr	x0, workaround_bpiall_vbar0_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
 	msr	vbar_el3, x0
 1:
 #endif
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 288f5af..f92e4ed 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -13,7 +13,7 @@
 func cortex_a75_reset_func
 #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
 	cpu_check_csv2	x0, 1f
-	adr	x0, workaround_bpiall_vbar0_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
 	msr	vbar_el3, x0
 1:
 #endif
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 9f13ed2..78c66e6 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -285,7 +285,7 @@
 #endif
 
 /*
- * int check_workaround_cve_2017_5715(void);
+ * int check_wa_cve_2017_5715(void);
  *
  * This function returns:
  *  - ERRATA_APPLIES when firmware mitigation is required.
@@ -296,8 +296,8 @@
  * NOTE: Must be called only after cpu_ops have been initialized
  *       in per-CPU data.
  */
-	.globl	check_workaround_cve_2017_5715
-func check_workaround_cve_2017_5715
+	.globl	check_wa_cve_2017_5715
+func check_wa_cve_2017_5715
 	mrs	x0, tpidr_el3
 #if ENABLE_ASSERTIONS
 	cmp	x0, #0
@@ -315,4 +315,4 @@
 1:
 	mov	x0, #ERRATA_NOT_APPLIES
 	ret
-endfunc check_workaround_cve_2017_5715
+endfunc check_wa_cve_2017_5715
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
similarity index 67%
rename from lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
rename to lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
index cd82497..8437155 100644
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
@@ -9,13 +9,13 @@
 #include <asm_macros.S>
 #include <context.h>
 
-	.globl	workaround_bpiall_vbar0_runtime_exceptions
+	.globl	wa_cve_2017_5715_bpiall_vbar
 
 #define EMIT_BPIALL		0xee070fd5
 #define EMIT_SMC		0xe1600070
 #define ESR_EL3_A64_SMC0	0x5e000000
 
-	.macro	enter_workaround _from_vector
+	.macro	apply_cve_2017_5715_wa _from_vector
 	/*
 	 * Save register state to enable a call to AArch32 S-EL1 and return
 	 * Identify the original calling vector in w2 (==_from_vector)
@@ -66,7 +66,7 @@
 	movz	w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
 
 	/* Switch EL3 exception vectors while the workaround is executing. */
-	adr	x9, workaround_bpiall_vbar1_runtime_exceptions
+	adr	x9, wa_cve_2017_5715_bpiall_ret_vbar
 
 	/* Setup SCTLR_EL1 with MMU off and I$ on */
 	ldr	x10, stub_sel1_sctlr
@@ -93,13 +93,13 @@
 	 * is not enabled, the existing runtime exception vector table is used.
 	 * ---------------------------------------------------------------------
 	 */
-vector_base workaround_bpiall_vbar0_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_vbar
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_EL0 : 0x0 - 0x200
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
+vector_entry bpiall_sync_exception_sp_el0
 	b	sync_exception_sp_el0
 	nop	/* to force 8 byte alignment for the following stub */
 
@@ -114,79 +114,79 @@
 	.word	EMIT_BPIALL
 	.word	EMIT_SMC
 
-	check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
+	check_vector_size bpiall_sync_exception_sp_el0
 
-vector_entry workaround_bpiall_vbar0_irq_sp_el0
+vector_entry bpiall_irq_sp_el0
 	b	irq_sp_el0
-	check_vector_size workaround_bpiall_vbar0_irq_sp_el0
+	check_vector_size bpiall_irq_sp_el0
 
-vector_entry workaround_bpiall_vbar0_fiq_sp_el0
+vector_entry bpiall_fiq_sp_el0
 	b	fiq_sp_el0
-	check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
+	check_vector_size bpiall_fiq_sp_el0
 
-vector_entry workaround_bpiall_vbar0_serror_sp_el0
+vector_entry bpiall_serror_sp_el0
 	b	serror_sp_el0
-	check_vector_size workaround_bpiall_vbar0_serror_sp_el0
+	check_vector_size bpiall_serror_sp_el0
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_ELx: 0x200 - 0x400
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
+vector_entry bpiall_sync_exception_sp_elx
 	b	sync_exception_sp_elx
-	check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx
+	check_vector_size bpiall_sync_exception_sp_elx
 
-vector_entry workaround_bpiall_vbar0_irq_sp_elx
+vector_entry bpiall_irq_sp_elx
 	b	irq_sp_elx
-	check_vector_size workaround_bpiall_vbar0_irq_sp_elx
+	check_vector_size bpiall_irq_sp_elx
 
-vector_entry workaround_bpiall_vbar0_fiq_sp_elx
+vector_entry bpiall_fiq_sp_elx
 	b	fiq_sp_elx
-	check_vector_size workaround_bpiall_vbar0_fiq_sp_elx
+	check_vector_size bpiall_fiq_sp_elx
 
-vector_entry workaround_bpiall_vbar0_serror_sp_elx
+vector_entry bpiall_serror_sp_elx
 	b	serror_sp_elx
-	check_vector_size workaround_bpiall_vbar0_serror_sp_elx
+	check_vector_size bpiall_serror_sp_elx
 
 	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch64 : 0x400 - 0x600
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
-	enter_workaround 1
-	check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
+vector_entry bpiall_sync_exception_aarch64
+	apply_cve_2017_5715_wa 1
+	check_vector_size bpiall_sync_exception_aarch64
 
-vector_entry workaround_bpiall_vbar0_irq_aarch64
-	enter_workaround 2
-	check_vector_size workaround_bpiall_vbar0_irq_aarch64
+vector_entry bpiall_irq_aarch64
+	apply_cve_2017_5715_wa 2
+	check_vector_size bpiall_irq_aarch64
 
-vector_entry workaround_bpiall_vbar0_fiq_aarch64
-	enter_workaround 4
-	check_vector_size workaround_bpiall_vbar0_fiq_aarch64
+vector_entry bpiall_fiq_aarch64
+	apply_cve_2017_5715_wa 4
+	check_vector_size bpiall_fiq_aarch64
 
-vector_entry workaround_bpiall_vbar0_serror_aarch64
-	enter_workaround 8
-	check_vector_size workaround_bpiall_vbar0_serror_aarch64
+vector_entry bpiall_serror_aarch64
+	apply_cve_2017_5715_wa 8
+	check_vector_size bpiall_serror_aarch64
 
 	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch32 : 0x600 - 0x800
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
-	enter_workaround 1
-	check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
+vector_entry bpiall_sync_exception_aarch32
+	apply_cve_2017_5715_wa 1
+	check_vector_size bpiall_sync_exception_aarch32
 
-vector_entry workaround_bpiall_vbar0_irq_aarch32
-	enter_workaround 2
-	check_vector_size workaround_bpiall_vbar0_irq_aarch32
+vector_entry bpiall_irq_aarch32
+	apply_cve_2017_5715_wa 2
+	check_vector_size bpiall_irq_aarch32
 
-vector_entry workaround_bpiall_vbar0_fiq_aarch32
-	enter_workaround 4
-	check_vector_size workaround_bpiall_vbar0_fiq_aarch32
+vector_entry bpiall_fiq_aarch32
+	apply_cve_2017_5715_wa 4
+	check_vector_size bpiall_fiq_aarch32
 
-vector_entry workaround_bpiall_vbar0_serror_aarch32
-	enter_workaround 8
-	check_vector_size workaround_bpiall_vbar0_serror_aarch32
+vector_entry bpiall_serror_aarch32
+	apply_cve_2017_5715_wa 8
+	check_vector_size bpiall_serror_aarch32
 
 	/* ---------------------------------------------------------------------
 	 * This vector table is used while the workaround is executing.  It
@@ -195,73 +195,73 @@
 	 * EL3 state before proceeding with the normal runtime exception vector.
 	 * ---------------------------------------------------------------------
 	 */
-vector_base workaround_bpiall_vbar1_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_ret_vbar
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
+vector_entry bpiall_ret_sync_exception_sp_el0
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0
+	check_vector_size bpiall_ret_sync_exception_sp_el0
 
-vector_entry workaround_bpiall_vbar1_irq_sp_el0
+vector_entry bpiall_ret_irq_sp_el0
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_irq_sp_el0
+	check_vector_size bpiall_ret_irq_sp_el0
 
-vector_entry workaround_bpiall_vbar1_fiq_sp_el0
+vector_entry bpiall_ret_fiq_sp_el0
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_fiq_sp_el0
+	check_vector_size bpiall_ret_fiq_sp_el0
 
-vector_entry workaround_bpiall_vbar1_serror_sp_el0
+vector_entry bpiall_ret_serror_sp_el0
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_serror_sp_el0
+	check_vector_size bpiall_ret_serror_sp_el0
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
+vector_entry bpiall_ret_sync_exception_sp_elx
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx
+	check_vector_size bpiall_ret_sync_exception_sp_elx
 
-vector_entry workaround_bpiall_vbar1_irq_sp_elx
+vector_entry bpiall_ret_irq_sp_elx
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_irq_sp_elx
+	check_vector_size bpiall_ret_irq_sp_elx
 
-vector_entry workaround_bpiall_vbar1_fiq_sp_elx
+vector_entry bpiall_ret_fiq_sp_elx
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_fiq_sp_elx
+	check_vector_size bpiall_ret_fiq_sp_elx
 
-vector_entry workaround_bpiall_vbar1_serror_sp_elx
+vector_entry bpiall_ret_serror_sp_elx
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_serror_sp_elx
+	check_vector_size bpiall_ret_serror_sp_elx
 
 	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
+vector_entry bpiall_ret_sync_exception_aarch64
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64
+	check_vector_size bpiall_ret_sync_exception_aarch64
 
-vector_entry workaround_bpiall_vbar1_irq_aarch64
+vector_entry bpiall_ret_irq_aarch64
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_irq_aarch64
+	check_vector_size bpiall_ret_irq_aarch64
 
-vector_entry workaround_bpiall_vbar1_fiq_aarch64
+vector_entry bpiall_ret_fiq_aarch64
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_fiq_aarch64
+	check_vector_size bpiall_ret_fiq_aarch64
 
-vector_entry workaround_bpiall_vbar1_serror_aarch64
+vector_entry bpiall_ret_serror_aarch64
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_serror_aarch64
+	check_vector_size bpiall_ret_serror_aarch64
 
 	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch32 : 0x600 - 0x800
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
+vector_entry bpiall_ret_sync_exception_aarch32
 	/*
 	 * w2 indicates which SEL1 stub was run and thus which original vector was used
 	 * w3-w6 contain saved system register state (esr_el3 in w3)
@@ -281,7 +281,7 @@
 	 * to workaround entry table in preparation for subsequent
 	 * Sync/IRQ/FIQ/SError exceptions.
 	 */
-	adr	x0, workaround_bpiall_vbar0_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
 	msr	vbar_el3, x0
 
 	/*
@@ -324,34 +324,34 @@
 1:
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	sync_exception_aarch64
-	check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
+	check_vector_size bpiall_ret_sync_exception_aarch32
 
-vector_entry workaround_bpiall_vbar1_irq_aarch32
+vector_entry bpiall_ret_irq_aarch32
 	b	report_unhandled_interrupt
 
 	/*
 	 * Post-workaround fan-out for non-sync exceptions
 	 */
 workaround_not_sync:
-	tbnz	w2, #3, workaround_bpiall_vbar1_serror
-	tbnz	w2, #2, workaround_bpiall_vbar1_fiq
+	tbnz	w2, #3, bpiall_ret_serror
+	tbnz	w2, #2, bpiall_ret_fiq
 	/* IRQ */
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	irq_aarch64
 
-workaround_bpiall_vbar1_fiq:
+bpiall_ret_fiq:
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	fiq_aarch64
 
-workaround_bpiall_vbar1_serror:
+bpiall_ret_serror:
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	serror_aarch64
-	check_vector_size workaround_bpiall_vbar1_irq_aarch32
+	check_vector_size bpiall_ret_irq_aarch32
 
-vector_entry workaround_bpiall_vbar1_fiq_aarch32
+vector_entry bpiall_ret_fiq_aarch32
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_fiq_aarch32
+	check_vector_size bpiall_ret_fiq_aarch32
 
-vector_entry workaround_bpiall_vbar1_serror_aarch32
+vector_entry bpiall_ret_serror_aarch32
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_serror_aarch32
+	check_vector_size bpiall_ret_serror_aarch32
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
new file mode 100644
index 0000000..039e373
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_arch_svc.h>
+#include <asm_macros.S>
+#include <context.h>
+
+	.globl	wa_cve_2017_5715_mmu_vbar
+
+#define ESR_EL3_A64_SMC0	0x5e000000
+
+vector_base wa_cve_2017_5715_mmu_vbar
+
+	.macro	apply_cve_2017_5715_wa _is_sync_exception
+	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	mrs	x1, sctlr_el3
+	/* Disable MMU */
+	bic	x1, x1, #SCTLR_M_BIT
+	msr	sctlr_el3, x1
+	isb
+	/* Enable MMU */
+	orr	x1, x1, #SCTLR_M_BIT
+	msr	sctlr_el3, x1
+	/*
+	 * Defer ISB to avoid synchronizing twice in case we hit
+	 * the workaround SMC call which will implicitly synchronize
+	 * because of the ERET instruction.
+	 */
+
+	/*
+	 * Ensure SMC is coming from A64 state on #0
+	 * with W0 = SMCCC_ARCH_WORKAROUND_1
+	 *
+	 * This sequence evaluates as:
+	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+	 * allowing use of a single branch operation
+	 */
+	.if \_is_sync_exception
+		orr	w1, wzr, #SMCCC_ARCH_WORKAROUND_1
+		cmp	w0, w1
+		mrs	x0, esr_el3
+		mov_imm	w1, ESR_EL3_A64_SMC0
+		ccmp	w0, w1, #0, eq
+		/* Static predictor will predict a fall through */
+		bne	1f
+		eret
+1:
+	.endif
+
+	/*
+	 * Synchronize now to enable the MMU.  This is required
+	 * to ensure the load pair below reads the data stored earlier.
+	 */
+	isb
+	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	.endm
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry mmu_sync_exception_sp_el0
+	b	sync_exception_sp_el0
+	check_vector_size mmu_sync_exception_sp_el0
+
+vector_entry mmu_irq_sp_el0
+	b	irq_sp_el0
+	check_vector_size mmu_irq_sp_el0
+
+vector_entry mmu_fiq_sp_el0
+	b	fiq_sp_el0
+	check_vector_size mmu_fiq_sp_el0
+
+vector_entry mmu_serror_sp_el0
+	b	serror_sp_el0
+	check_vector_size mmu_serror_sp_el0
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry mmu_sync_exception_sp_elx
+	b	sync_exception_sp_elx
+	check_vector_size mmu_sync_exception_sp_elx
+
+vector_entry mmu_irq_sp_elx
+	b	irq_sp_elx
+	check_vector_size mmu_irq_sp_elx
+
+vector_entry mmu_fiq_sp_elx
+	b	fiq_sp_elx
+	check_vector_size mmu_fiq_sp_elx
+
+vector_entry mmu_serror_sp_elx
+	b	serror_sp_elx
+	check_vector_size mmu_serror_sp_elx
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry mmu_sync_exception_aarch64
+	apply_cve_2017_5715_wa _is_sync_exception=1
+	b	sync_exception_aarch64
+	check_vector_size mmu_sync_exception_aarch64
+
+vector_entry mmu_irq_aarch64
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	irq_aarch64
+	check_vector_size mmu_irq_aarch64
+
+vector_entry mmu_fiq_aarch64
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	fiq_aarch64
+	check_vector_size mmu_fiq_aarch64
+
+vector_entry mmu_serror_aarch64
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	serror_aarch64
+	check_vector_size mmu_serror_aarch64
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry mmu_sync_exception_aarch32
+	apply_cve_2017_5715_wa _is_sync_exception=1
+	b	sync_exception_aarch32
+	check_vector_size mmu_sync_exception_aarch32
+
+vector_entry mmu_irq_aarch32
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	irq_aarch32
+	check_vector_size mmu_irq_aarch32
+
+vector_entry mmu_fiq_aarch32
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	fiq_aarch32
+	check_vector_size mmu_fiq_aarch32
+
+vector_entry mmu_serror_aarch32
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	serror_aarch32
+	check_vector_size mmu_serror_aarch32
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
deleted file mode 100644
index b24b620..0000000
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <arm_arch_svc.h>
-#include <asm_macros.S>
-#include <context.h>
-
-	.globl	workaround_mmu_runtime_exceptions
-
-#define ESR_EL3_A64_SMC0	0x5e000000
-
-vector_base workaround_mmu_runtime_exceptions
-
-	.macro	apply_workaround _is_sync_exception
-	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	mrs	x1, sctlr_el3
-	/* Disable MMU */
-	bic	x1, x1, #SCTLR_M_BIT
-	msr	sctlr_el3, x1
-	isb
-	/* Enable MMU */
-	orr	x1, x1, #SCTLR_M_BIT
-	msr	sctlr_el3, x1
-	/*
-	 * Defer ISB to avoid synchronizing twice in case we hit
-	 * the workaround SMC call which will implicitly synchronize
-	 * because of the ERET instruction.
-	 */
-
-	/*
-	 * Ensure SMC is coming from A64 state on #0
-	 * with W0 = SMCCC_ARCH_WORKAROUND_1
-	 *
-	 * This sequence evaluates as:
-	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
-	 * allowing use of a single branch operation
-	 */
-	.if \_is_sync_exception
-		orr	w1, wzr, #SMCCC_ARCH_WORKAROUND_1
-		cmp	w0, w1
-		mrs	x0, esr_el3
-		mov_imm	w1, ESR_EL3_A64_SMC0
-		ccmp	w0, w1, #0, eq
-		/* Static predictor will predict a fall through */
-		bne	1f
-		eret
-1:
-	.endif
-
-	/*
-	 * Synchronize now to enable the MMU.  This is required
-	 * to ensure the load pair below reads the data stored earlier.
-	 */
-	isb
-	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	.endm
-
-	/* ---------------------------------------------------------------------
-	 * Current EL with SP_EL0 : 0x0 - 0x200
-	 * ---------------------------------------------------------------------
-	 */
-vector_entry workaround_mmu_sync_exception_sp_el0
-	b	sync_exception_sp_el0
-	check_vector_size workaround_mmu_sync_exception_sp_el0
-
-vector_entry workaround_mmu_irq_sp_el0
-	b	irq_sp_el0
-	check_vector_size workaround_mmu_irq_sp_el0
-
-vector_entry workaround_mmu_fiq_sp_el0
-	b	fiq_sp_el0
-	check_vector_size workaround_mmu_fiq_sp_el0
-
-vector_entry workaround_mmu_serror_sp_el0
-	b	serror_sp_el0
-	check_vector_size workaround_mmu_serror_sp_el0
-
-	/* ---------------------------------------------------------------------
-	 * Current EL with SP_ELx: 0x200 - 0x400
-	 * ---------------------------------------------------------------------
-	 */
-vector_entry workaround_mmu_sync_exception_sp_elx
-	b	sync_exception_sp_elx
-	check_vector_size workaround_mmu_sync_exception_sp_elx
-
-vector_entry workaround_mmu_irq_sp_elx
-	b	irq_sp_elx
-	check_vector_size workaround_mmu_irq_sp_elx
-
-vector_entry workaround_mmu_fiq_sp_elx
-	b	fiq_sp_elx
-	check_vector_size workaround_mmu_fiq_sp_elx
-
-vector_entry workaround_mmu_serror_sp_elx
-	b	serror_sp_elx
-	check_vector_size workaround_mmu_serror_sp_elx
-
-	/* ---------------------------------------------------------------------
-	 * Lower EL using AArch64 : 0x400 - 0x600
-	 * ---------------------------------------------------------------------
-	 */
-vector_entry workaround_mmu_sync_exception_aarch64
-	apply_workaround _is_sync_exception=1
-	b	sync_exception_aarch64
-	check_vector_size workaround_mmu_sync_exception_aarch64
-
-vector_entry workaround_mmu_irq_aarch64
-	apply_workaround _is_sync_exception=0
-	b	irq_aarch64
-	check_vector_size workaround_mmu_irq_aarch64
-
-vector_entry workaround_mmu_fiq_aarch64
-	apply_workaround _is_sync_exception=0
-	b	fiq_aarch64
-	check_vector_size workaround_mmu_fiq_aarch64
-
-vector_entry workaround_mmu_serror_aarch64
-	apply_workaround _is_sync_exception=0
-	b	serror_aarch64
-	check_vector_size workaround_mmu_serror_aarch64
-
-	/* ---------------------------------------------------------------------
-	 * Lower EL using AArch32 : 0x600 - 0x800
-	 * ---------------------------------------------------------------------
-	 */
-vector_entry workaround_mmu_sync_exception_aarch32
-	apply_workaround _is_sync_exception=1
-	b	sync_exception_aarch32
-	check_vector_size workaround_mmu_sync_exception_aarch32
-
-vector_entry workaround_mmu_irq_aarch32
-	apply_workaround _is_sync_exception=0
-	b	irq_aarch32
-	check_vector_size workaround_mmu_irq_aarch32
-
-vector_entry workaround_mmu_fiq_aarch32
-	apply_workaround _is_sync_exception=0
-	b	fiq_aarch32
-	check_vector_size workaround_mmu_fiq_aarch32
-
-vector_entry workaround_mmu_serror_aarch32
-	apply_workaround _is_sync_exception=0
-	b	serror_aarch32
-	check_vector_size workaround_mmu_serror_aarch32
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index eb736c0..c357ebd 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -10,7 +10,7 @@
 #include <runtime_svc.h>
 #include <smccc.h>
 #include <smccc_helpers.h>
-#include <workaround_cve_2017_5715.h>
+#include <wa_cve_2017_5715.h>
 
 static int32_t smccc_version(void)
 {
@@ -25,7 +25,7 @@
 		return SMC_OK;
 #if WORKAROUND_CVE_2017_5715
 	case SMCCC_ARCH_WORKAROUND_1:
-		if (check_workaround_cve_2017_5715() == ERRATA_NOT_APPLIES)
+		if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)
 			return 1;
 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
 #endif