Merge pull request #1389 from danielboulby-arm/db/bugfix

Code change to fix small bugs
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 0e47ddf..a6c0a9a 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -61,8 +61,8 @@
 endif
 
 ifeq (${WORKAROUND_CVE_2017_5715},1)
-BL31_SOURCES		+=	lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S	\
-				lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+BL31_SOURCES		+=	lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S	\
+				lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
 endif
 
 BL31_LINKERFILE		:=	bl31/bl31.ld.S
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index 193b1d5..6233299 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -29,8 +29,8 @@
 endif
 
 ifeq (${WORKAROUND_CVE_2017_5715},1)
-BL32_SOURCES		+=	bl32/sp_min/workaround_cve_2017_5715_bpiall.S	\
-				bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
+BL32_SOURCES		+=	bl32/sp_min/wa_cve_2017_5715_bpiall.S	\
+				bl32/sp_min/wa_cve_2017_5715_icache_inv.S
 endif
 
 BL32_LINKERFILE	:=	bl32/sp_min/sp_min.ld.S
diff --git a/bl32/sp_min/workaround_cve_2017_5715_bpiall.S b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
similarity index 94%
rename from bl32/sp_min/workaround_cve_2017_5715_bpiall.S
rename to bl32/sp_min/wa_cve_2017_5715_bpiall.S
index 5387cef..385f3d4 100644
--- a/bl32/sp_min/workaround_cve_2017_5715_bpiall.S
+++ b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
@@ -6,9 +6,9 @@
 
 #include <asm_macros.S>
 
-	.globl	workaround_bpiall_runtime_exceptions
+	.globl	wa_cve_2017_5715_bpiall_vbar
 
-vector_base workaround_bpiall_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_vbar
 	/* We encode the exception entry in the bottom 3 bits of SP */
 	add	sp, sp, #1	/* Reset: 0b111 */
 	add	sp, sp, #1	/* Undef: 0b110 */
diff --git a/bl32/sp_min/workaround_cve_2017_5715_icache_inv.S b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
similarity index 94%
rename from bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
rename to bl32/sp_min/wa_cve_2017_5715_icache_inv.S
index 9102b02..d0a4625 100644
--- a/bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
+++ b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
@@ -6,9 +6,9 @@
 
 #include <asm_macros.S>
 
-	.globl	workaround_icache_inv_runtime_exceptions
+	.globl	wa_cve_2017_5715_icache_inv_vbar
 
-vector_base workaround_icache_inv_runtime_exceptions
+vector_base wa_cve_2017_5715_icache_inv_vbar
 	/* We encode the exception entry in the bottom 3 bits of SP */
 	add	sp, sp, #1	/* Reset: 0b111 */
 	add	sp, sp, #1	/* Undef: 0b110 */
diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst
index 65f6adb..c11f640 100644
--- a/docs/cpu-specific-build-macros.rst
+++ b/docs/cpu-specific-build-macros.rst
@@ -24,6 +24,17 @@
    with the recommendation in the spec regarding workaround discovery.
    Defaults to 1.
 
+-  ``WORKAROUND_CVE_2018_3639``: Enables the security workaround for
+   `CVE-2018-3639`_. Defaults to 1. The TF-A project recommends to keep
+   the default value of 1 even on platforms that are unaffected by
+   CVE-2018-3639, in order to comply with the recommendation in the spec
+   regarding workaround discovery.
+
+-  ``DYNAMIC_WORKAROUND_CVE_2018_3639``: Enables dynamic mitigation for
+   `CVE-2018-3639`_. This build option should be set to 1 if the target
+   platform contains at least 1 CPU that requires dynamic mitigation.
+   Defaults to 0.
+
 CPU Errata Workarounds
 ----------------------
 
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
index 477eeaa..51f5b42 100644
--- a/docs/firmware-design.rst
+++ b/docs/firmware-design.rst
@@ -516,8 +516,8 @@
 in memory and changing the address where the system jumps at reset.
 For example:
 
-	-C cluster0.cpu0.RVBAR=0x4014000
-	--data cluster0.cpu0=bl2.bin@0x4014000
+	-C cluster0.cpu0.RVBAR=0x4020000
+	--data cluster0.cpu0=bl2.bin@0x4020000
 
 With this configuration, FVP is like a platform of the first case,
 where the Boot ROM jumps always to the same address. For simplification,
@@ -1743,17 +1743,20 @@
    this is also used for the MHU payload when passing messages to and from the
    SCP.
 
+-  Another 4 KB page is reserved for passing memory layout between BL1 and BL2
+   and also the dynamic firmware configurations.
+
 -  On FVP, BL1 is originally sitting in the Trusted ROM at address ``0x0``. On
    Juno, BL1 resides in flash memory at address ``0x0BEC0000``. BL1 read-write
    data are relocated to the top of Trusted SRAM at runtime.
 
+-  BL2 is loaded below BL1 RW
+
 -  EL3 Runtime Software, BL31 for AArch64 and BL32 for AArch32 (e.g. SP\_MIN),
    is loaded at the top of the Trusted SRAM, such that its NOBITS sections will
-   overwrite BL1 R/W data. This implies that BL1 global variables remain valid
-   only until execution reaches the EL3 Runtime Software entry point during a
-   cold boot.
-
--  BL2 is loaded below EL3 Runtime Software.
+   overwrite BL1 R/W data and BL2. This implies that BL1 global variables
+   remain valid only until execution reaches the EL3 Runtime Software entry
+   point during a cold boot.
 
 -  On Juno, SCP\_BL2 is loaded temporarily into the EL3 Runtime Software memory
    region and transfered to the SCP before being overwritten by EL3 Runtime
@@ -1766,9 +1769,8 @@
    -  Secure region of DRAM (top 16MB of DRAM configured by the TrustZone
       controller)
 
-   When BL32 (for AArch64) is loaded into Trusted SRAM, its NOBITS sections
-   are allowed to overlay BL2. This memory layout is designed to give the
-   BL32 image as much memory as possible when it is loaded into Trusted SRAM.
+   When BL32 (for AArch64) is loaded into Trusted SRAM, it is loaded below
+   BL31.
 
 When LOAD\_IMAGE\_V2 is disabled the memory regions for the overlap detection
 mechanism at boot time are defined as follows (shown per API):
@@ -1814,21 +1816,32 @@
 Note: Loading the BL32 image in TZC secured DRAM doesn't change the memory
 layout of the other images in Trusted SRAM.
 
-**FVP with TSP in Trusted SRAM (default option):**
+**FVP with TSP in Trusted SRAM with firmware configs :**
 (These diagrams only cover the AArch64 case)
 
 ::
 
+                   DRAM
+    0xffffffff +----------+
+               :          :
+               |----------|
+               |HW_CONFIG |
+    0x83000000 |----------|  (non-secure)
+               |          |
+    0x80000000 +----------+
+
                Trusted SRAM
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+    0x04040000 +----------+  loaded by BL2  +----------------+
+               | BL1 (rw) |  <<<<<<<<<<<<<  |                |
+               |----------|  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |   BL2    |  <<<<<<<<<<<<<  |                |
                |----------|  <<<<<<<<<<<<<  |----------------|
                |          |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
-               |----------|                 ------------------
-               |   BL2    |  <<<<<<<<<<<<<  |  BL32 NOBITS   |
-               |----------|  <<<<<<<<<<<<<  |----------------|
-               |          |  <<<<<<<<<<<<<  | BL32 PROGBITS  |
-    0x04001000 +----------+                 ------------------
+               |          |  <<<<<<<<<<<<<  |----------------|
+               |          |  <<<<<<<<<<<<<  |     BL32       |
+    0x04002000 +----------+                 +----------------+
+               |fw_configs|
+    0x04001000 +----------+
                |  Shared  |
     0x04000000 +----------+
 
@@ -1837,7 +1850,7 @@
                | BL1 (ro) |
     0x00000000 +----------+
 
-**FVP with TSP in Trusted DRAM with TB_FW_CONFIG and HW_CONFIG :**
+**FVP with TSP in Trusted DRAM with firmware configs (default option):**
 
 ::
 
@@ -1856,17 +1869,15 @@
     0x06000000 +--------------+
 
                  Trusted SRAM
-    0x04040000 +--------------+  loaded by BL2  ------------------
-               |   BL1 (rw)   |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+    0x04040000 +--------------+  loaded by BL2  +----------------+
+               |   BL1 (rw)   |  <<<<<<<<<<<<<  |                |
+               |--------------|  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |     BL2      |  <<<<<<<<<<<<<  |                |
                |--------------|  <<<<<<<<<<<<<  |----------------|
                |              |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
-               |--------------|                 ------------------
-               |     BL2      |
-               |--------------|
-               |              |
-               |--------------|
-               | TB_FW_CONFIG |
-               |--------------|
+               |              |                 +----------------+
+               +--------------+
+               |  fw_configs  |
     0x04001000 +--------------+
                |    Shared    |
     0x04000000 +--------------+
@@ -1876,7 +1887,7 @@
                |   BL1 (ro)   |
     0x00000000 +--------------+
 
-**FVP with TSP in TZC-Secured DRAM:**
+**FVP with TSP in TZC-Secured DRAM with firmware configs :**
 
 ::
 
@@ -1885,19 +1896,22 @@
                |  BL32    |  (secure)
     0xff000000 +----------+
                |          |
-               :          :  (non-secure)
+               |----------|
+               |HW_CONFIG |
+    0x83000000 |----------|  (non-secure)
                |          |
     0x80000000 +----------+
 
                Trusted SRAM
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+    0x04040000 +----------+  loaded by BL2  +----------------+
+               | BL1 (rw) |  <<<<<<<<<<<<<  |                |
+               |----------|  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |   BL2    |  <<<<<<<<<<<<<  |                |
                |----------|  <<<<<<<<<<<<<  |----------------|
                |          |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
-               |----------|                 ------------------
-               |   BL2    |
-               |----------|
-               |          |
+               |          |                 +----------------+
+    0x04002000 +----------+
+               |fw_configs|
     0x04001000 +----------+
                |  Shared  |
     0x04000000 +----------+
@@ -1907,7 +1921,7 @@
                | BL1 (ro) |
     0x00000000 +----------+
 
-**Juno with BL32 in Trusted SRAM (default option):**
+**Juno with BL32 in Trusted SRAM :**
 
 ::
 
@@ -1921,19 +1935,21 @@
     0x08000000 +----------+                  BL31 is loaded
                                              after SCP_BL2 has
                Trusted SRAM                  been sent to SCP
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+    0x04040000 +----------+  loaded by BL2  +----------------+
+               | BL1 (rw) |  <<<<<<<<<<<<<  |                |
+               |----------|  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |   BL2    |  <<<<<<<<<<<<<  |                |
                |----------|  <<<<<<<<<<<<<  |----------------|
                | SCP_BL2  |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
-               |----------|                 ------------------
-               |   BL2    |  <<<<<<<<<<<<<  |  BL32 NOBITS   |
                |----------|  <<<<<<<<<<<<<  |----------------|
-               |          |  <<<<<<<<<<<<<  | BL32 PROGBITS  |
-    0x04001000 +----------+                 ------------------
+               |          |  <<<<<<<<<<<<<  |     BL32       |
+               |          |                 +----------------+
+               |          |
+    0x04001000 +----------+
                |   MHU    |
     0x04000000 +----------+
 
-**Juno with BL32 in TZC-secured DRAM:**
+**Juno with BL32 in TZC-secured DRAM :**
 
 ::
 
@@ -1956,14 +1972,13 @@
     0x08000000 +----------+                  BL31 is loaded
                                              after SCP_BL2 has
                Trusted SRAM                  been sent to SCP
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+    0x04040000 +----------+  loaded by BL2  +----------------+
+               | BL1 (rw) |  <<<<<<<<<<<<<  |                |
+               |----------|  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |   BL2    |  <<<<<<<<<<<<<  |                |
                |----------|  <<<<<<<<<<<<<  |----------------|
                | SCP_BL2  |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
-               |----------|                 ------------------
-               |   BL2    |
-               |----------|
-               |          |
+               |----------|                 +----------------+
     0x04001000 +----------+
                |   MHU    |
     0x04000000 +----------+
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index 5f3823d..f6d0c76 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -1928,7 +1928,7 @@
     -C cluster1.cpu1.RVBAR=0x04001000                            \
     -C cluster1.cpu2.RVBAR=0x04001000                            \
     -C cluster1.cpu3.RVBAR=0x04001000                            \
-    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000    \
+    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04002000    \
     --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000    \
     --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000            \
     --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000  \
@@ -1959,7 +1959,7 @@
     -C cluster1.cpu2.RVBARADDR=0x04020000                        \
     -C cluster1.cpu3.RVBARADDR=0x04020000                        \
     --data cluster0.cpu0="<path-to>/<bl31-binary>"@0x04020000    \
-    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000    \
+    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04002000    \
     --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000    \
     --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000            \
     --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000  \
@@ -1982,7 +1982,7 @@
     -C cluster0.cpu1.RVBARADDR=0x04001000                       \
     -C cluster0.cpu2.RVBARADDR=0x04001000                       \
     -C cluster0.cpu3.RVBARADDR=0x04001000                       \
-    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000   \
+    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04002000   \
     --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000   \
     --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000           \
     --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000 \
diff --git a/include/lib/cpus/aarch32/cortex_a57.h b/include/lib/cpus/aarch32/cortex_a57.h
index 3fac9c7..18cabe1 100644
--- a/include/lib/cpus/aarch32/cortex_a57.h
+++ b/include/lib/cpus/aarch32/cortex_a57.h
@@ -44,6 +44,7 @@
 #define CORTEX_A57_CPUACTLR				p15, 0, c15
 
 #define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB		(ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE		(ULL(1) << 55)
 #define CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE		(ULL(1) << 54)
 #define CORTEX_A57_CPUACTLR_DIS_OVERREAD		(ULL(1) << 52)
 #define CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA		(ULL(1) << 49)
diff --git a/include/lib/cpus/aarch32/cortex_a72.h b/include/lib/cpus/aarch32/cortex_a72.h
index f7da1f0..0331ace 100644
--- a/include/lib/cpus/aarch32/cortex_a72.h
+++ b/include/lib/cpus/aarch32/cortex_a72.h
@@ -32,6 +32,7 @@
 #define CORTEX_A72_CPUACTLR				p15, 0, c15
 
 #define CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH	(ULL(1) << 56)
+#define CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE		(ULL(1) << 55)
 #define CORTEX_A72_CPUACTLR_NO_ALLOC_WBWA		(ULL(1) << 49)
 #define CORTEX_A72_CPUACTLR_DCC_AS_DCCI			(ULL(1) << 44)
 #define CORTEX_A72_CPUACTLR_DIS_INSTR_PREFETCH		(ULL(1) << 32)
diff --git a/include/lib/cpus/aarch64/cortex_a57.h b/include/lib/cpus/aarch64/cortex_a57.h
index 6c45c06..83ec934 100644
--- a/include/lib/cpus/aarch64/cortex_a57.h
+++ b/include/lib/cpus/aarch64/cortex_a57.h
@@ -44,6 +44,7 @@
 #define CORTEX_A57_CPUACTLR_EL1				S3_1_C15_C2_0
 
 #define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB	(ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE	(ULL(1) << 55)
 #define CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE	(ULL(1) << 54)
 #define CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD		(ULL(1) << 52)
 #define CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA		(ULL(1) << 49)
diff --git a/include/lib/cpus/aarch64/cortex_a72.h b/include/lib/cpus/aarch64/cortex_a72.h
index 6fbb707..9f18470 100644
--- a/include/lib/cpus/aarch64/cortex_a72.h
+++ b/include/lib/cpus/aarch64/cortex_a72.h
@@ -32,6 +32,7 @@
 #define CORTEX_A72_CPUACTLR_EL1					S3_1_C15_C2_0
 
 #define CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH	(ULL(1) << 56)
+#define CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE		(ULL(1) << 55)
 #define CORTEX_A72_CPUACTLR_EL1_NO_ALLOC_WBWA			(ULL(1) << 49)
 #define CORTEX_A72_CPUACTLR_EL1_DCC_AS_DCCI			(ULL(1) << 44)
 #define CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH		(ULL(1) << 32)
diff --git a/include/lib/cpus/aarch64/cortex_a73.h b/include/lib/cpus/aarch64/cortex_a73.h
index faff5fe..4db0cae 100644
--- a/include/lib/cpus/aarch64/cortex_a73.h
+++ b/include/lib/cpus/aarch64/cortex_a73.h
@@ -22,4 +22,11 @@
  ******************************************************************************/
 #define CORTEX_A73_L2MERRSR_EL1		S3_1_C15_C2_3   /* Instruction def. */
 
+/*******************************************************************************
+ * CPU implementation defined register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A73_IMP_DEF_REG1		S3_0_C15_C0_0
+
+#define CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE	(1 << 3)
+
 #endif /* __CORTEX_A73_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h
index 20f0251..493c7d4 100644
--- a/include/lib/cpus/aarch64/cortex_a75.h
+++ b/include/lib/cpus/aarch64/cortex_a75.h
@@ -16,6 +16,13 @@
 #define CORTEX_A75_CPUPWRCTLR_EL1	S3_0_C15_C2_7
 #define CORTEX_A75_CPUECTLR_EL1		S3_0_C15_C1_4
 
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A75_CPUACTLR_EL1		S3_0_C15_C1_0
+
+#define CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE	(1 << 35)
+
 /* Definitions of register field mask in CORTEX_A75_CPUPWRCTLR_EL1 */
 #define CORTEX_A75_CORE_PWRDN_EN_MASK	0x1
 
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index bfe2449..cd8f3e8 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -18,6 +18,9 @@
 /* Special constant to specify that CPU has no reset function */
 #define CPU_NO_RESET_FUNC		0
 
+#define CPU_NO_EXTRA1_FUNC		0
+#define CPU_NO_EXTRA2_FUNC		0
+
 /* Word size for 64-bit CPUs */
 #define CPU_WORD_SIZE			8
 
@@ -48,6 +51,8 @@
 #endif
 CPU_EXTRA1_FUNC:
 	.space	8
+CPU_EXTRA2_FUNC:
+	.space	8
 #ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
 CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
 	.space  (8 * CPU_MAX_PWR_DWN_OPS)
@@ -119,6 +124,10 @@
 	 *	This is a placeholder for future per CPU operations.  Currently,
 	 *	some CPUs use this entry to set a test function to determine if
 	 *	the workaround for CVE-2017-5715 needs to be applied or not.
+	 * _extra2:
+	 *	This is a placeholder for future per CPU operations.  Currently
+	 *	some CPUs use this entry to set a function to disable the
+	 *	workaround for CVE-2018-3639.
 	 * _power_down_ops:
 	 *	Comma-separated list of functions to perform power-down
 	 *	operatios on the CPU. At least one, and up to
@@ -129,7 +138,7 @@
 	 *	used to handle power down at subsequent levels
 	 */
 	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
-		_extra1:req, _power_down_ops:vararg
+		_extra1:req, _extra2:req, _power_down_ops:vararg
 	.section cpu_ops, "a"
 	.align 3
 	.type cpu_ops_\_name, %object
@@ -138,6 +147,7 @@
 	.quad \_resetfunc
 #endif
 	.quad \_extra1
+	.quad \_extra2
 #ifdef IMAGE_BL31
 1:
 	/* Insert list of functions */
@@ -196,14 +206,15 @@
 
 	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
 		_power_down_ops:vararg
-		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, \
+		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, \
 			\_power_down_ops
 	.endm
 
-	.macro declare_cpu_ops_workaround_cve_2017_5715 _name:req, _midr:req, \
-		_resetfunc:req, _extra1:req, _power_down_ops:vararg
+	.macro declare_cpu_ops_wa _name:req, _midr:req, \
+		_resetfunc:req, _extra1:req, _extra2:req, \
+		_power_down_ops:vararg
 		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
-			\_extra1, \_power_down_ops
+			\_extra1, \_extra2, \_power_down_ops
 	.endm
 
 #if REPORT_ERRATA
diff --git a/include/lib/cpus/wa_cve_2017_5715.h b/include/lib/cpus/wa_cve_2017_5715.h
new file mode 100644
index 0000000..0a65a56
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2017_5715.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __WA_CVE_2017_5715_H__
+#define __WA_CVE_2017_5715_H__
+
+int check_wa_cve_2017_5715(void);
+
+#endif /* __WA_CVE_2017_5715_H__ */
diff --git a/include/lib/cpus/wa_cve_2018_3639.h b/include/lib/cpus/wa_cve_2018_3639.h
new file mode 100644
index 0000000..36546f7
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2018_3639.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __WA_CVE_2018_3639_H__
+#define __WA_CVE_2018_3639_H__
+
+void *wa_cve_2018_3639_get_disable_ptr(void);
+
+#endif /* __WA_CVE_2018_3639_H__ */
diff --git a/include/lib/cpus/workaround_cve_2017_5715.h b/include/lib/cpus/workaround_cve_2017_5715.h
deleted file mode 100644
index e837a67..0000000
--- a/include/lib/cpus/workaround_cve_2017_5715.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef __WORKAROUND_CVE_2017_5715_H__
-#define __WORKAROUND_CVE_2017_5715_H__
-
-int check_workaround_cve_2017_5715(void);
-
-#endif /* __WORKAROUND_CVE_2017_5715_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index cdd74a3..a4f3ea1 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -128,8 +128,8 @@
  * Constants that allow assembler code to access members of and the 'fp_regs'
  * structure at their correct offsets.
  ******************************************************************************/
-#if CTX_INCLUDE_FPREGS
 #define CTX_FPREGS_OFFSET	(CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
+#if CTX_INCLUDE_FPREGS
 #define CTX_FP_Q0		U(0x0)
 #define CTX_FP_Q1		U(0x10)
 #define CTX_FP_Q2		U(0x20)
@@ -170,8 +170,14 @@
 #else
 #define CTX_FPREGS_END		U(0x210) /* Align to the next 16 byte boundary */
 #endif
+#else
+#define CTX_FPREGS_END		U(0)
 #endif
 
+#define CTX_CVE_2018_3639_OFFSET	(CTX_FPREGS_OFFSET + CTX_FPREGS_END)
+#define CTX_CVE_2018_3639_DISABLE	U(0)
+#define CTX_CVE_2018_3639_END		U(0x10) /* Align to the next 16 byte boundary */
+
 #ifndef __ASSEMBLY__
 
 #include <cassert.h>
@@ -195,6 +201,7 @@
 #define CTX_FPREG_ALL		(CTX_FPREGS_END >> DWORD_SHIFT)
 #endif
 #define CTX_EL3STATE_ALL	(CTX_EL3STATE_END >> DWORD_SHIFT)
+#define CTX_CVE_2018_3639_ALL	(CTX_CVE_2018_3639_END >> DWORD_SHIFT)
 
 /*
  * AArch64 general purpose register context structure. Usually x0-x18,
@@ -227,6 +234,9 @@
  */
 DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
 
+/* Function pointer used by CVE-2018-3639 dynamic mitigation */
+DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL);
+
 /*
  * Macros to access members of any of the above structures using their
  * offsets
@@ -251,6 +261,7 @@
 #if CTX_INCLUDE_FPREGS
 	fp_regs_t fpregs_ctx;
 #endif
+	cve_2018_3639_t cve_2018_3639_ctx;
 } cpu_context_t;
 
 /* Macros to access members of the 'cpu_context_t' structure */
@@ -276,6 +287,8 @@
 #endif
 CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
 	assert_core_context_el3state_offset_mismatch);
+CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx), \
+	assert_core_context_cve_2018_3639_offset_mismatch);
 
 /*
  * Helper macro to set the general purpose registers that correspond to
diff --git a/include/plat/arm/board/common/board_arm_def.h b/include/plat/arm/board/common/board_arm_def.h
index 21ceae3..2d8e4c1 100644
--- a/include/plat/arm/board/common/board_arm_def.h
+++ b/include/plat/arm/board/common/board_arm_def.h
@@ -93,21 +93,19 @@
 #endif
 
 /*
- * PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
- * little space for growth.
+ * Since BL31 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL31_SIZE is
+ * calculated using the current BL31 PROGBITS debug size plus the sizes of
+ * BL2 and BL1-RW
  */
-#if ENABLE_SPM
-# define PLAT_ARM_MAX_BL31_SIZE		0x40000
-#else
-# define PLAT_ARM_MAX_BL31_SIZE		0x20000
-#endif
+#define PLAT_ARM_MAX_BL31_SIZE		0x3B000
 
 #ifdef AARCH32
 /*
- * PLAT_ARM_MAX_BL32_SIZE is calculated for SP_MIN as the AArch32 Secure
- * Payload.
+ * Since BL32 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL32_SIZE is
+ * calculated using the current SP_MIN PROGBITS debug size plus the sizes of
+ * BL2 and BL1-RW
  */
-# define PLAT_ARM_MAX_BL32_SIZE		0x1D000
+# define PLAT_ARM_MAX_BL32_SIZE		0x3B000
 #endif
 
 #endif /* ARM_BOARD_OPTIMISE_MEM */
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index 18390d6..1f62ebe 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -317,7 +317,7 @@
  * and limit. Leave enough space of BL2 meminfo.
  */
 #define ARM_TB_FW_CONFIG_BASE		ARM_BL_RAM_BASE + sizeof(meminfo_t)
-#define ARM_TB_FW_CONFIG_LIMIT		BL2_BASE
+#define ARM_TB_FW_CONFIG_LIMIT		ARM_BL_RAM_BASE + PAGE_SIZE
 
 /*******************************************************************************
  * BL1 specific defines.
@@ -338,32 +338,18 @@
 /*******************************************************************************
  * BL2 specific defines.
  ******************************************************************************/
-#if ARM_BL31_IN_DRAM
-/*
- * For AArch64 BL31 is loaded in the DRAM.
- * Put BL2 just below BL1.
- */
-#define BL2_BASE			(BL1_RW_BASE - PLAT_ARM_MAX_BL2_SIZE)
-#define BL2_LIMIT			BL1_RW_BASE
-
-#elif BL2_AT_EL3
-
-#define BL2_BASE			ARM_BL_RAM_BASE
+#if BL2_AT_EL3
+/* Put BL2 in the middle of the Trusted SRAM */
+#define BL2_BASE			(ARM_TRUSTED_SRAM_BASE + \
+						(PLAT_ARM_TRUSTED_SRAM_SIZE >> 1))
 #define BL2_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
 
-#elif defined(AARCH32) || JUNO_AARCH32_EL3_RUNTIME
-/*
- * Put BL2 just below BL32.
- */
-#define BL2_BASE			(BL32_BASE - PLAT_ARM_MAX_BL2_SIZE)
-#define BL2_LIMIT			BL32_BASE
-
 #else
 /*
- * Put BL2 just below BL31.
+ * Put BL2 just below BL1.
  */
-#define BL2_BASE			(BL31_BASE - PLAT_ARM_MAX_BL2_SIZE)
-#define BL2_LIMIT			BL31_BASE
+#define BL2_BASE			(BL1_RW_BASE - PLAT_ARM_MAX_BL2_SIZE)
+#define BL2_LIMIT			BL1_RW_BASE
 #endif
 
 /*******************************************************************************
@@ -384,13 +370,10 @@
 						(PLAT_ARM_TRUSTED_SRAM_SIZE >> 1))
 #define BL31_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
 #else
-/*
- * Put BL31 at the top of the Trusted SRAM.
- */
-#define BL31_BASE			(ARM_BL_RAM_BASE +		\
-						ARM_BL_RAM_SIZE -	\
-						PLAT_ARM_MAX_BL31_SIZE)
-#define BL31_PROGBITS_LIMIT		BL1_RW_BASE
+/* Put BL31 below BL2 in the Trusted SRAM.*/
+#define BL31_BASE			((ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)\
+						- PLAT_ARM_MAX_BL31_SIZE)
+#define BL31_PROGBITS_LIMIT		BL2_BASE
 #define BL31_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
 #endif
 
@@ -399,15 +382,17 @@
  * BL32 specific defines for EL3 runtime in AArch32 mode
  ******************************************************************************/
 # if RESET_TO_SP_MIN && !JUNO_AARCH32_EL3_RUNTIME
-/* SP_MIN is the only BL image in SRAM. Allocate the whole of SRAM to BL32 */
-#  define BL32_BASE			ARM_BL_RAM_BASE
+/*
+ * SP_MIN is the only BL image in SRAM. Allocate the whole of SRAM (excluding
+ * the page reserved for fw_configs) to BL32
+ */
+#  define BL32_BASE			ARM_TB_FW_CONFIG_LIMIT
 #  define BL32_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
 # else
-/* Put BL32 at the top of the Trusted SRAM.*/
-#  define BL32_BASE			(ARM_BL_RAM_BASE +		\
-						ARM_BL_RAM_SIZE -	\
-						PLAT_ARM_MAX_BL32_SIZE)
-#  define BL32_PROGBITS_LIMIT		BL1_RW_BASE
+/* Put BL32 below BL2 in the Trusted SRAM.*/
+#  define BL32_BASE			((ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)\
+						- PLAT_ARM_MAX_BL32_SIZE)
+#  define BL32_PROGBITS_LIMIT		BL2_BASE
 #  define BL32_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
 # endif /* RESET_TO_SP_MIN && !JUNO_AARCH32_EL3_RUNTIME */
 
@@ -438,8 +423,8 @@
 # elif ARM_TSP_RAM_LOCATION_ID == ARM_TRUSTED_SRAM_ID
 #  define TSP_SEC_MEM_BASE		ARM_BL_RAM_BASE
 #  define TSP_SEC_MEM_SIZE		ARM_BL_RAM_SIZE
-#  define TSP_PROGBITS_LIMIT		BL2_BASE
-#  define BL32_BASE			ARM_BL_RAM_BASE
+#  define TSP_PROGBITS_LIMIT		BL31_BASE
+#  define BL32_BASE			ARM_TB_FW_CONFIG_LIMIT
 #  define BL32_LIMIT			BL31_BASE
 # elif ARM_TSP_RAM_LOCATION_ID == ARM_TRUSTED_DRAM_ID
 #  define TSP_SEC_MEM_BASE		PLAT_ARM_TRUSTED_DRAM_BASE
diff --git a/include/plat/arm/css/common/css_def.h b/include/plat/arm/css/common/css_def.h
index 6d68b44..725c27c 100644
--- a/include/plat/arm/css/common/css_def.h
+++ b/include/plat/arm/css/common/css_def.h
@@ -158,14 +158,14 @@
 /*
  * Load address of SCP_BL2 in CSS platform ports
  * SCP_BL2 is loaded to the same place as BL31 but it shouldn't overwrite BL1
- * rw data.  Once SCP_BL2 is transferred to the SCP, it is discarded and BL31
- * is loaded over the top.
+ * rw data or BL2.  Once SCP_BL2 is transferred to the SCP, it is discarded and
+ * BL31 is loaded over the top.
  */
-#define SCP_BL2_BASE			(BL1_RW_BASE - PLAT_CSS_MAX_SCP_BL2_SIZE)
-#define SCP_BL2_LIMIT			BL1_RW_BASE
+#define SCP_BL2_BASE			(BL2_BASE - PLAT_CSS_MAX_SCP_BL2_SIZE)
+#define SCP_BL2_LIMIT			BL2_BASE
 
-#define SCP_BL2U_BASE			(BL1_RW_BASE - PLAT_CSS_MAX_SCP_BL2U_SIZE)
-#define SCP_BL2U_LIMIT			BL1_RW_BASE
+#define SCP_BL2U_BASE			(BL2_BASE - PLAT_CSS_MAX_SCP_BL2U_SIZE)
+#define SCP_BL2U_LIMIT			BL2_BASE
 #endif /* CSS_LOAD_SCP_IMAGES */
 
 /* Load address of Non-Secure Image for CSS platform ports */
diff --git a/include/services/arm_arch_svc.h b/include/services/arm_arch_svc.h
index 2961601..0d2f477 100644
--- a/include/services/arm_arch_svc.h
+++ b/include/services/arm_arch_svc.h
@@ -10,5 +10,8 @@
 #define SMCCC_VERSION			U(0x80000000)
 #define SMCCC_ARCH_FEATURES		U(0x80000001)
 #define SMCCC_ARCH_WORKAROUND_1		U(0x80008000)
+#define SMCCC_ARCH_WORKAROUND_2		U(0x80007FFF)
+
+#define SMCCC_ARCH_NOT_REQUIRED		-2
 
 #endif /* __ARM_ARCH_SVC_H__ */
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index f446bff..dff86be 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -337,6 +337,15 @@
 	bx	lr
 endfunc check_errata_cve_2017_5715
 
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+	mov	r0, #ERRATA_APPLIES
+#else
+	mov	r0, #ERRATA_MISSING
+#endif
+	bx	lr
+endfunc check_errata_cve_2018_3639
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A57.
 	 * Shall clobber: r0-r6
@@ -392,6 +401,14 @@
 	bl	errata_a57_859972_wa
 #endif
 
+#if WORKAROUND_CVE_2018_3639
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	isb
+	dsb	sy
+#endif
+
 	/* ---------------------------------------------
 	 * Enable the SMP bit.
 	 * ---------------------------------------------
@@ -525,6 +542,7 @@
 	report_errata ERRATA_A57_833471, cortex_a57, 833471
 	report_errata ERRATA_A57_859972, cortex_a57, 859972
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
+	report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
 
 	pop	{r12, lr}
 	bx	lr
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index 56e91f5..3bc3388 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -92,6 +92,15 @@
 	bx	lr
 endfunc check_errata_cve_2017_5715
 
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+	mov	r0, #ERRATA_APPLIES
+#else
+	mov	r0, #ERRATA_MISSING
+#endif
+	bx	lr
+endfunc check_errata_cve_2018_3639
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
 	 * -------------------------------------------------
@@ -105,6 +114,15 @@
 	mov	r0, r4
 	bl	errata_a72_859971_wa
 #endif
+
+#if WORKAROUND_CVE_2018_3639
+	ldcopr16	r0, r1, CORTEX_A72_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE
+	stcopr16	r0, r1, CORTEX_A72_CPUACTLR
+	isb
+	dsb	sy
+#endif
+
 	/* ---------------------------------------------
 	 * Enable the SMP bit.
 	 * ---------------------------------------------
@@ -241,6 +259,7 @@
 	 */
 	report_errata ERRATA_A72_859971, cortex_a72, 859971
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
+	report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
 
 	pop	{r12, lr}
 	bx	lr
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index 4d072e1..07fadd1 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -337,6 +337,15 @@
 	ret
 endfunc check_errata_cve_2017_5715
 
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2018_3639
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A57.
 	 * Shall clobber: x0-x19
@@ -393,10 +402,18 @@
 #endif
 
 #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
-	adr	x0, workaround_mmu_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_mmu_vbar
 	msr	vbar_el3, x0
 #endif
 
+#if WORKAROUND_CVE_2018_3639
+	mrs	x0, CORTEX_A57_CPUACTLR_EL1
+	orr	x0, x0, #CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE
+	msr	CORTEX_A57_CPUACTLR_EL1, x0
+	isb
+	dsb	sy
+#endif
+
 	/* ---------------------------------------------
 	 * Enable the SMP bit.
 	 * ---------------------------------------------
@@ -528,6 +545,7 @@
 	report_errata ERRATA_A57_833471, cortex_a57, 833471
 	report_errata ERRATA_A57_859972, cortex_a57, 859972
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
+	report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -555,8 +573,9 @@
 	ret
 endfunc cortex_a57_cpu_reg_dump
 
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a57, CORTEX_A57_MIDR, \
+declare_cpu_ops_wa cortex_a57, CORTEX_A57_MIDR, \
 	cortex_a57_reset_func, \
 	check_errata_cve_2017_5715, \
+	CPU_NO_EXTRA2_FUNC, \
 	cortex_a57_core_pwr_dwn, \
 	cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 29fa77b..bb9381d 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -110,6 +110,15 @@
 	ret
 endfunc check_errata_cve_2017_5715
 
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2018_3639
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
 	 * -------------------------------------------------
@@ -126,11 +135,19 @@
 
 #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
 	cpu_check_csv2	x0, 1f
-	adr	x0, workaround_mmu_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_mmu_vbar
 	msr	vbar_el3, x0
 1:
 #endif
 
+#if WORKAROUND_CVE_2018_3639
+	mrs	x0, CORTEX_A72_CPUACTLR_EL1
+	orr	x0, x0, #CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE
+	msr	CORTEX_A72_CPUACTLR_EL1, x0
+	isb
+	dsb	sy
+#endif
+
 	/* ---------------------------------------------
 	 * Enable the SMP bit.
 	 * ---------------------------------------------
@@ -265,6 +282,7 @@
 	 */
 	report_errata ERRATA_A72_859971, cortex_a72, 859971
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
+	report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -292,8 +310,9 @@
 	ret
 endfunc cortex_a72_cpu_reg_dump
 
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a72, CORTEX_A72_MIDR, \
+declare_cpu_ops_wa cortex_a72, CORTEX_A72_MIDR, \
 	cortex_a72_reset_func, \
 	check_errata_cve_2017_5715, \
+	CPU_NO_EXTRA2_FUNC, \
 	cortex_a72_core_pwr_dwn, \
 	cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 0a961ea..d595f12 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -38,11 +38,18 @@
 func cortex_a73_reset_func
 #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
 	cpu_check_csv2	x0, 1f
-	adr	x0, workaround_bpiall_vbar0_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
 	msr	vbar_el3, x0
 1:
 #endif
 
+#if WORKAROUND_CVE_2018_3639
+	mrs	x0, CORTEX_A73_IMP_DEF_REG1
+	orr	x0, x0, #CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE
+	msr	CORTEX_A73_IMP_DEF_REG1, x0
+	isb
+#endif
+
 	/* ---------------------------------------------
 	 * Enable the SMP bit.
 	 * Clobbers : x0
@@ -129,6 +136,15 @@
 	ret
 endfunc check_errata_cve_2017_5715
 
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2018_3639
+
 #if REPORT_ERRATA
 /*
  * Errata printing function for Cortex A75. Must follow AAPCS.
@@ -144,6 +160,7 @@
 	 * checking functions of each errata.
 	 */
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a73, cve_2017_5715
+	report_errata WORKAROUND_CVE_2018_3639, cortex_a73, cve_2018_3639
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -170,8 +187,9 @@
 	ret
 endfunc cortex_a73_cpu_reg_dump
 
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a73, CORTEX_A73_MIDR, \
+declare_cpu_ops_wa cortex_a73, CORTEX_A73_MIDR, \
 	cortex_a73_reset_func, \
 	check_errata_cve_2017_5715, \
+	CPU_NO_EXTRA2_FUNC, \
 	cortex_a73_core_pwr_dwn, \
 	cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 288f5af..20ec32c 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -13,11 +13,18 @@
 func cortex_a75_reset_func
 #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
 	cpu_check_csv2	x0, 1f
-	adr	x0, workaround_bpiall_vbar0_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
 	msr	vbar_el3, x0
 1:
 #endif
 
+#if WORKAROUND_CVE_2018_3639
+	mrs	x0, CORTEX_A75_CPUACTLR_EL1
+	orr	x0, x0, #CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE
+	msr	CORTEX_A75_CPUACTLR_EL1, x0
+	isb
+#endif
+
 #if ENABLE_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
 	mrs	x0, actlr_el3
@@ -57,6 +64,15 @@
 	ret
 endfunc check_errata_cve_2017_5715
 
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2018_3639
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -88,6 +104,7 @@
 	 * checking functions of each errata.
 	 */
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a75, cve_2017_5715
+	report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -113,7 +130,8 @@
 	ret
 endfunc cortex_a75_cpu_reg_dump
 
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a75, CORTEX_A75_MIDR, \
+declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \
 	cortex_a75_reset_func, \
 	check_errata_cve_2017_5715, \
+	CPU_NO_EXTRA2_FUNC, \
 	cortex_a75_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 9f13ed2..69ece8f 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -285,7 +285,7 @@
 #endif
 
 /*
- * int check_workaround_cve_2017_5715(void);
+ * int check_wa_cve_2017_5715(void);
  *
  * This function returns:
  *  - ERRATA_APPLIES when firmware mitigation is required.
@@ -296,8 +296,8 @@
  * NOTE: Must be called only after cpu_ops have been initialized
  *       in per-CPU data.
  */
-	.globl	check_workaround_cve_2017_5715
-func check_workaround_cve_2017_5715
+	.globl	check_wa_cve_2017_5715
+func check_wa_cve_2017_5715
 	mrs	x0, tpidr_el3
 #if ENABLE_ASSERTIONS
 	cmp	x0, #0
@@ -315,4 +315,28 @@
 1:
 	mov	x0, #ERRATA_NOT_APPLIES
 	ret
-endfunc check_workaround_cve_2017_5715
+endfunc check_wa_cve_2017_5715
+
+/*
+ * void *wa_cve_2018_3639_get_disable_ptr(void);
+ *
+ * Returns a function pointer which is used to disable mitigation
+ * for CVE-2018-3639.
+ * The function pointer is only returned on cores that employ
+ * dynamic mitigation.  If the core uses static mitigation or is
+ * unaffected by CVE-2018-3639 this function returns NULL.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ *       in per-CPU data.
+ */
+	.globl	wa_cve_2018_3639_get_disable_ptr
+func wa_cve_2018_3639_get_disable_ptr
+	mrs	x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif
+	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
+	ret
+endfunc wa_cve_2018_3639_get_disable_ptr
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
similarity index 67%
rename from lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
rename to lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
index cd82497..8437155 100644
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
@@ -9,13 +9,13 @@
 #include <asm_macros.S>
 #include <context.h>
 
-	.globl	workaround_bpiall_vbar0_runtime_exceptions
+	.globl	wa_cve_2017_5715_bpiall_vbar
 
 #define EMIT_BPIALL		0xee070fd5
 #define EMIT_SMC		0xe1600070
 #define ESR_EL3_A64_SMC0	0x5e000000
 
-	.macro	enter_workaround _from_vector
+	.macro	apply_cve_2017_5715_wa _from_vector
 	/*
 	 * Save register state to enable a call to AArch32 S-EL1 and return
 	 * Identify the original calling vector in w2 (==_from_vector)
@@ -66,7 +66,7 @@
 	movz	w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
 
 	/* Switch EL3 exception vectors while the workaround is executing. */
-	adr	x9, workaround_bpiall_vbar1_runtime_exceptions
+	adr	x9, wa_cve_2017_5715_bpiall_ret_vbar
 
 	/* Setup SCTLR_EL1 with MMU off and I$ on */
 	ldr	x10, stub_sel1_sctlr
@@ -93,13 +93,13 @@
 	 * is not enabled, the existing runtime exception vector table is used.
 	 * ---------------------------------------------------------------------
 	 */
-vector_base workaround_bpiall_vbar0_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_vbar
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_EL0 : 0x0 - 0x200
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
+vector_entry bpiall_sync_exception_sp_el0
 	b	sync_exception_sp_el0
 	nop	/* to force 8 byte alignment for the following stub */
 
@@ -114,79 +114,79 @@
 	.word	EMIT_BPIALL
 	.word	EMIT_SMC
 
-	check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
+	check_vector_size bpiall_sync_exception_sp_el0
 
-vector_entry workaround_bpiall_vbar0_irq_sp_el0
+vector_entry bpiall_irq_sp_el0
 	b	irq_sp_el0
-	check_vector_size workaround_bpiall_vbar0_irq_sp_el0
+	check_vector_size bpiall_irq_sp_el0
 
-vector_entry workaround_bpiall_vbar0_fiq_sp_el0
+vector_entry bpiall_fiq_sp_el0
 	b	fiq_sp_el0
-	check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
+	check_vector_size bpiall_fiq_sp_el0
 
-vector_entry workaround_bpiall_vbar0_serror_sp_el0
+vector_entry bpiall_serror_sp_el0
 	b	serror_sp_el0
-	check_vector_size workaround_bpiall_vbar0_serror_sp_el0
+	check_vector_size bpiall_serror_sp_el0
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_ELx: 0x200 - 0x400
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
+vector_entry bpiall_sync_exception_sp_elx
 	b	sync_exception_sp_elx
-	check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx
+	check_vector_size bpiall_sync_exception_sp_elx
 
-vector_entry workaround_bpiall_vbar0_irq_sp_elx
+vector_entry bpiall_irq_sp_elx
 	b	irq_sp_elx
-	check_vector_size workaround_bpiall_vbar0_irq_sp_elx
+	check_vector_size bpiall_irq_sp_elx
 
-vector_entry workaround_bpiall_vbar0_fiq_sp_elx
+vector_entry bpiall_fiq_sp_elx
 	b	fiq_sp_elx
-	check_vector_size workaround_bpiall_vbar0_fiq_sp_elx
+	check_vector_size bpiall_fiq_sp_elx
 
-vector_entry workaround_bpiall_vbar0_serror_sp_elx
+vector_entry bpiall_serror_sp_elx
 	b	serror_sp_elx
-	check_vector_size workaround_bpiall_vbar0_serror_sp_elx
+	check_vector_size bpiall_serror_sp_elx
 
 	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch64 : 0x400 - 0x600
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
-	enter_workaround 1
-	check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
+vector_entry bpiall_sync_exception_aarch64
+	apply_cve_2017_5715_wa 1
+	check_vector_size bpiall_sync_exception_aarch64
 
-vector_entry workaround_bpiall_vbar0_irq_aarch64
-	enter_workaround 2
-	check_vector_size workaround_bpiall_vbar0_irq_aarch64
+vector_entry bpiall_irq_aarch64
+	apply_cve_2017_5715_wa 2
+	check_vector_size bpiall_irq_aarch64
 
-vector_entry workaround_bpiall_vbar0_fiq_aarch64
-	enter_workaround 4
-	check_vector_size workaround_bpiall_vbar0_fiq_aarch64
+vector_entry bpiall_fiq_aarch64
+	apply_cve_2017_5715_wa 4
+	check_vector_size bpiall_fiq_aarch64
 
-vector_entry workaround_bpiall_vbar0_serror_aarch64
-	enter_workaround 8
-	check_vector_size workaround_bpiall_vbar0_serror_aarch64
+vector_entry bpiall_serror_aarch64
+	apply_cve_2017_5715_wa 8
+	check_vector_size bpiall_serror_aarch64
 
 	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch32 : 0x600 - 0x800
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
-	enter_workaround 1
-	check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
+vector_entry bpiall_sync_exception_aarch32
+	apply_cve_2017_5715_wa 1
+	check_vector_size bpiall_sync_exception_aarch32
 
-vector_entry workaround_bpiall_vbar0_irq_aarch32
-	enter_workaround 2
-	check_vector_size workaround_bpiall_vbar0_irq_aarch32
+vector_entry bpiall_irq_aarch32
+	apply_cve_2017_5715_wa 2
+	check_vector_size bpiall_irq_aarch32
 
-vector_entry workaround_bpiall_vbar0_fiq_aarch32
-	enter_workaround 4
-	check_vector_size workaround_bpiall_vbar0_fiq_aarch32
+vector_entry bpiall_fiq_aarch32
+	apply_cve_2017_5715_wa 4
+	check_vector_size bpiall_fiq_aarch32
 
-vector_entry workaround_bpiall_vbar0_serror_aarch32
-	enter_workaround 8
-	check_vector_size workaround_bpiall_vbar0_serror_aarch32
+vector_entry bpiall_serror_aarch32
+	apply_cve_2017_5715_wa 8
+	check_vector_size bpiall_serror_aarch32
 
 	/* ---------------------------------------------------------------------
 	 * This vector table is used while the workaround is executing.  It
@@ -195,73 +195,73 @@
 	 * EL3 state before proceeding with the normal runtime exception vector.
 	 * ---------------------------------------------------------------------
 	 */
-vector_base workaround_bpiall_vbar1_runtime_exceptions
+vector_base wa_cve_2017_5715_bpiall_ret_vbar
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
+vector_entry bpiall_ret_sync_exception_sp_el0
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0
+	check_vector_size bpiall_ret_sync_exception_sp_el0
 
-vector_entry workaround_bpiall_vbar1_irq_sp_el0
+vector_entry bpiall_ret_irq_sp_el0
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_irq_sp_el0
+	check_vector_size bpiall_ret_irq_sp_el0
 
-vector_entry workaround_bpiall_vbar1_fiq_sp_el0
+vector_entry bpiall_ret_fiq_sp_el0
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_fiq_sp_el0
+	check_vector_size bpiall_ret_fiq_sp_el0
 
-vector_entry workaround_bpiall_vbar1_serror_sp_el0
+vector_entry bpiall_ret_serror_sp_el0
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_serror_sp_el0
+	check_vector_size bpiall_ret_serror_sp_el0
 
 	/* ---------------------------------------------------------------------
 	 * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
+vector_entry bpiall_ret_sync_exception_sp_elx
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx
+	check_vector_size bpiall_ret_sync_exception_sp_elx
 
-vector_entry workaround_bpiall_vbar1_irq_sp_elx
+vector_entry bpiall_ret_irq_sp_elx
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_irq_sp_elx
+	check_vector_size bpiall_ret_irq_sp_elx
 
-vector_entry workaround_bpiall_vbar1_fiq_sp_elx
+vector_entry bpiall_ret_fiq_sp_elx
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_fiq_sp_elx
+	check_vector_size bpiall_ret_fiq_sp_elx
 
-vector_entry workaround_bpiall_vbar1_serror_sp_elx
+vector_entry bpiall_ret_serror_sp_elx
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_serror_sp_elx
+	check_vector_size bpiall_ret_serror_sp_elx
 
 	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
+vector_entry bpiall_ret_sync_exception_aarch64
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64
+	check_vector_size bpiall_ret_sync_exception_aarch64
 
-vector_entry workaround_bpiall_vbar1_irq_aarch64
+vector_entry bpiall_ret_irq_aarch64
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_irq_aarch64
+	check_vector_size bpiall_ret_irq_aarch64
 
-vector_entry workaround_bpiall_vbar1_fiq_aarch64
+vector_entry bpiall_ret_fiq_aarch64
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_fiq_aarch64
+	check_vector_size bpiall_ret_fiq_aarch64
 
-vector_entry workaround_bpiall_vbar1_serror_aarch64
+vector_entry bpiall_ret_serror_aarch64
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_serror_aarch64
+	check_vector_size bpiall_ret_serror_aarch64
 
 	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch32 : 0x600 - 0x800
 	 * ---------------------------------------------------------------------
 	 */
-vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
+vector_entry bpiall_ret_sync_exception_aarch32
 	/*
 	 * w2 indicates which SEL1 stub was run and thus which original vector was used
 	 * w3-w6 contain saved system register state (esr_el3 in w3)
@@ -281,7 +281,7 @@
 	 * to workaround entry table in preparation for subsequent
 	 * Sync/IRQ/FIQ/SError exceptions.
 	 */
-	adr	x0, workaround_bpiall_vbar0_runtime_exceptions
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
 	msr	vbar_el3, x0
 
 	/*
@@ -324,34 +324,34 @@
 1:
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	sync_exception_aarch64
-	check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
+	check_vector_size bpiall_ret_sync_exception_aarch32
 
-vector_entry workaround_bpiall_vbar1_irq_aarch32
+vector_entry bpiall_ret_irq_aarch32
 	b	report_unhandled_interrupt
 
 	/*
 	 * Post-workaround fan-out for non-sync exceptions
 	 */
 workaround_not_sync:
-	tbnz	w2, #3, workaround_bpiall_vbar1_serror
-	tbnz	w2, #2, workaround_bpiall_vbar1_fiq
+	tbnz	w2, #3, bpiall_ret_serror
+	tbnz	w2, #2, bpiall_ret_fiq
 	/* IRQ */
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	irq_aarch64
 
-workaround_bpiall_vbar1_fiq:
+bpiall_ret_fiq:
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	fiq_aarch64
 
-workaround_bpiall_vbar1_serror:
+bpiall_ret_serror:
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
 	b	serror_aarch64
-	check_vector_size workaround_bpiall_vbar1_irq_aarch32
+	check_vector_size bpiall_ret_irq_aarch32
 
-vector_entry workaround_bpiall_vbar1_fiq_aarch32
+vector_entry bpiall_ret_fiq_aarch32
 	b	report_unhandled_interrupt
-	check_vector_size workaround_bpiall_vbar1_fiq_aarch32
+	check_vector_size bpiall_ret_fiq_aarch32
 
-vector_entry workaround_bpiall_vbar1_serror_aarch32
+vector_entry bpiall_ret_serror_aarch32
 	b	report_unhandled_exception
-	check_vector_size workaround_bpiall_vbar1_serror_aarch32
+	check_vector_size bpiall_ret_serror_aarch32
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
new file mode 100644
index 0000000..039e373
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_arch_svc.h>
+#include <asm_macros.S>
+#include <context.h>
+
+	.globl	wa_cve_2017_5715_mmu_vbar
+
+#define ESR_EL3_A64_SMC0	0x5e000000
+
+vector_base wa_cve_2017_5715_mmu_vbar
+
+	.macro	apply_cve_2017_5715_wa _is_sync_exception
+	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	mrs	x1, sctlr_el3
+	/* Disable MMU */
+	bic	x1, x1, #SCTLR_M_BIT
+	msr	sctlr_el3, x1
+	isb
+	/* Enable MMU */
+	orr	x1, x1, #SCTLR_M_BIT
+	msr	sctlr_el3, x1
+	/*
+	 * Defer ISB to avoid synchronizing twice in case we hit
+	 * the workaround SMC call which will implicitly synchronize
+	 * because of the ERET instruction.
+	 */
+
+	/*
+	 * Ensure SMC is coming from A64 state on #0
+	 * with W0 = SMCCC_ARCH_WORKAROUND_1
+	 *
+	 * This sequence evaluates as:
+	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+	 * allowing use of a single branch operation
+	 */
+	.if \_is_sync_exception
+		orr	w1, wzr, #SMCCC_ARCH_WORKAROUND_1
+		cmp	w0, w1
+		mrs	x0, esr_el3
+		mov_imm	w1, ESR_EL3_A64_SMC0
+		ccmp	w0, w1, #0, eq
+		/* Static predictor will predict a fall through */
+		bne	1f
+		eret
+1:
+	.endif
+
+	/*
+	 * Synchronize now to enable the MMU.  This is required
+	 * to ensure the load pair below reads the data stored earlier.
+	 */
+	isb
+	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	.endm
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry mmu_sync_exception_sp_el0
+	b	sync_exception_sp_el0
+	check_vector_size mmu_sync_exception_sp_el0
+
+vector_entry mmu_irq_sp_el0
+	b	irq_sp_el0
+	check_vector_size mmu_irq_sp_el0
+
+vector_entry mmu_fiq_sp_el0
+	b	fiq_sp_el0
+	check_vector_size mmu_fiq_sp_el0
+
+vector_entry mmu_serror_sp_el0
+	b	serror_sp_el0
+	check_vector_size mmu_serror_sp_el0
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry mmu_sync_exception_sp_elx
+	b	sync_exception_sp_elx
+	check_vector_size mmu_sync_exception_sp_elx
+
+vector_entry mmu_irq_sp_elx
+	b	irq_sp_elx
+	check_vector_size mmu_irq_sp_elx
+
+vector_entry mmu_fiq_sp_elx
+	b	fiq_sp_elx
+	check_vector_size mmu_fiq_sp_elx
+
+vector_entry mmu_serror_sp_elx
+	b	serror_sp_elx
+	check_vector_size mmu_serror_sp_elx
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry mmu_sync_exception_aarch64
+	apply_cve_2017_5715_wa _is_sync_exception=1
+	b	sync_exception_aarch64
+	check_vector_size mmu_sync_exception_aarch64
+
+vector_entry mmu_irq_aarch64
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	irq_aarch64
+	check_vector_size mmu_irq_aarch64
+
+vector_entry mmu_fiq_aarch64
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	fiq_aarch64
+	check_vector_size mmu_fiq_aarch64
+
+vector_entry mmu_serror_aarch64
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	serror_aarch64
+	check_vector_size mmu_serror_aarch64
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry mmu_sync_exception_aarch32
+	apply_cve_2017_5715_wa _is_sync_exception=1
+	b	sync_exception_aarch32
+	check_vector_size mmu_sync_exception_aarch32
+
+vector_entry mmu_irq_aarch32
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	irq_aarch32
+	check_vector_size mmu_irq_aarch32
+
+vector_entry mmu_fiq_aarch32
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	fiq_aarch32
+	check_vector_size mmu_fiq_aarch32
+
+vector_entry mmu_serror_aarch32
+	apply_cve_2017_5715_wa _is_sync_exception=0
+	b	serror_aarch32
+	check_vector_size mmu_serror_aarch32
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
deleted file mode 100644
index b24b620..0000000
--- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <arm_arch_svc.h>
-#include <asm_macros.S>
-#include <context.h>
-
-	.globl	workaround_mmu_runtime_exceptions
-
-#define ESR_EL3_A64_SMC0	0x5e000000
-
-vector_base workaround_mmu_runtime_exceptions
-
-	.macro	apply_workaround _is_sync_exception
-	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	mrs	x1, sctlr_el3
-	/* Disable MMU */
-	bic	x1, x1, #SCTLR_M_BIT
-	msr	sctlr_el3, x1
-	isb
-	/* Enable MMU */
-	orr	x1, x1, #SCTLR_M_BIT
-	msr	sctlr_el3, x1
-	/*
-	 * Defer ISB to avoid synchronizing twice in case we hit
-	 * the workaround SMC call which will implicitly synchronize
-	 * because of the ERET instruction.
-	 */
-
-	/*
-	 * Ensure SMC is coming from A64 state on #0
-	 * with W0 = SMCCC_ARCH_WORKAROUND_1
-	 *
-	 * This sequence evaluates as:
-	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
-	 * allowing use of a single branch operation
-	 */
-	.if \_is_sync_exception
-		orr	w1, wzr, #SMCCC_ARCH_WORKAROUND_1
-		cmp	w0, w1
-		mrs	x0, esr_el3
-		mov_imm	w1, ESR_EL3_A64_SMC0
-		ccmp	w0, w1, #0, eq
-		/* Static predictor will predict a fall through */
-		bne	1f
-		eret
-1:
-	.endif
-
-	/*
-	 * Synchronize now to enable the MMU.  This is required
-	 * to ensure the load pair below reads the data stored earlier.
-	 */
-	isb
-	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	.endm
-
-	/* ---------------------------------------------------------------------
-	 * Current EL with SP_EL0 : 0x0 - 0x200
-	 * ---------------------------------------------------------------------
-	 */
-vector_entry workaround_mmu_sync_exception_sp_el0
-	b	sync_exception_sp_el0
-	check_vector_size workaround_mmu_sync_exception_sp_el0
-
-vector_entry workaround_mmu_irq_sp_el0
-	b	irq_sp_el0
-	check_vector_size workaround_mmu_irq_sp_el0
-
-vector_entry workaround_mmu_fiq_sp_el0
-	b	fiq_sp_el0
-	check_vector_size workaround_mmu_fiq_sp_el0
-
-vector_entry workaround_mmu_serror_sp_el0
-	b	serror_sp_el0
-	check_vector_size workaround_mmu_serror_sp_el0
-
-	/* ---------------------------------------------------------------------
-	 * Current EL with SP_ELx: 0x200 - 0x400
-	 * ---------------------------------------------------------------------
-	 */
-vector_entry workaround_mmu_sync_exception_sp_elx
-	b	sync_exception_sp_elx
-	check_vector_size workaround_mmu_sync_exception_sp_elx
-
-vector_entry workaround_mmu_irq_sp_elx
-	b	irq_sp_elx
-	check_vector_size workaround_mmu_irq_sp_elx
-
-vector_entry workaround_mmu_fiq_sp_elx
-	b	fiq_sp_elx
-	check_vector_size workaround_mmu_fiq_sp_elx
-
-vector_entry workaround_mmu_serror_sp_elx
-	b	serror_sp_elx
-	check_vector_size workaround_mmu_serror_sp_elx
-
-	/* ---------------------------------------------------------------------
-	 * Lower EL using AArch64 : 0x400 - 0x600
-	 * ---------------------------------------------------------------------
-	 */
-vector_entry workaround_mmu_sync_exception_aarch64
-	apply_workaround _is_sync_exception=1
-	b	sync_exception_aarch64
-	check_vector_size workaround_mmu_sync_exception_aarch64
-
-vector_entry workaround_mmu_irq_aarch64
-	apply_workaround _is_sync_exception=0
-	b	irq_aarch64
-	check_vector_size workaround_mmu_irq_aarch64
-
-vector_entry workaround_mmu_fiq_aarch64
-	apply_workaround _is_sync_exception=0
-	b	fiq_aarch64
-	check_vector_size workaround_mmu_fiq_aarch64
-
-vector_entry workaround_mmu_serror_aarch64
-	apply_workaround _is_sync_exception=0
-	b	serror_aarch64
-	check_vector_size workaround_mmu_serror_aarch64
-
-	/* ---------------------------------------------------------------------
-	 * Lower EL using AArch32 : 0x600 - 0x800
-	 * ---------------------------------------------------------------------
-	 */
-vector_entry workaround_mmu_sync_exception_aarch32
-	apply_workaround _is_sync_exception=1
-	b	sync_exception_aarch32
-	check_vector_size workaround_mmu_sync_exception_aarch32
-
-vector_entry workaround_mmu_irq_aarch32
-	apply_workaround _is_sync_exception=0
-	b	irq_aarch32
-	check_vector_size workaround_mmu_irq_aarch32
-
-vector_entry workaround_mmu_fiq_aarch32
-	apply_workaround _is_sync_exception=0
-	b	fiq_aarch32
-	check_vector_size workaround_mmu_fiq_aarch32
-
-vector_entry workaround_mmu_serror_aarch32
-	apply_workaround _is_sync_exception=0
-	b	serror_aarch32
-	check_vector_size workaround_mmu_serror_aarch32
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 3ba8c1f..434c13e 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -17,6 +17,8 @@
 A57_DISABLE_NON_TEMPORAL_HINT	?=1
 
 WORKAROUND_CVE_2017_5715	?=1
+WORKAROUND_CVE_2018_3639	?=1
+DYNAMIC_WORKAROUND_CVE_2018_3639	?=0
 
 # Process SKIP_A57_L1_FLUSH_PWR_DWN flag
 $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -34,6 +36,19 @@
 $(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715))
 $(eval $(call add_define,WORKAROUND_CVE_2017_5715))
 
+# Process WORKAROUND_CVE_2018_3639 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2018_3639))
+$(eval $(call add_define,WORKAROUND_CVE_2018_3639))
+
+$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
+$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
+
+ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0)
+    ifeq (${WORKAROUND_CVE_2018_3639},0)
+        $(error "Error: WORKAROUND_CVE_2018_3639 must be 1 if DYNAMIC_WORKAROUND_CVE_2018_3639 is 1")
+    endif
+endif
+
 # CPU Errata Build flags.
 # These should be enabled by the platform if the erratum workaround needs to be
 # applied.
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 121ca4d..707e6db 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -404,6 +404,15 @@
 	msr	spsr_el3, x16
 	msr	elr_el3, x17
 
+#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
+	/* Restore mitigation state as it was on entry to EL3 */
+	ldr	x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+	cmp	x17, xzr
+	beq	1f
+	blr	x17
+#endif
+
+1:
 	/* Restore saved general purpose registers and return */
 	b	restore_gp_registers_eret
 endfunc el3_exit
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index c02831a..f807dc6 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -225,12 +225,6 @@
     override BL1_SOURCES =
 endif
 
-ifeq (${ENABLE_SPM},1)
-ifneq (${ARM_BL31_IN_DRAM},1)
-        $(error "Error: SPM needs BL31 to be located in DRAM.")
-endif
-endif
-
 include plat/arm/board/common/board_common.mk
 include plat/arm/common/arm_common.mk
 
diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h
index e616e1f..80d4ba8 100644
--- a/plat/arm/board/juno/include/platform_def.h
+++ b/plat/arm/board/juno/include/platform_def.h
@@ -117,7 +117,7 @@
  * plus a little space for growth.
  */
 #if TRUSTED_BOARD_BOOT
-# define PLAT_ARM_MAX_BL1_RW_SIZE	0xA000
+# define PLAT_ARM_MAX_BL1_RW_SIZE	0xB000
 #else
 # define PLAT_ARM_MAX_BL1_RW_SIZE	0x6000
 #endif
@@ -128,7 +128,7 @@
  */
 #if TRUSTED_BOARD_BOOT
 #if TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA_AND_ECDSA
-# define PLAT_ARM_MAX_BL2_SIZE		0x20000
+# define PLAT_ARM_MAX_BL2_SIZE		0x1F000
 #elif TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_ECDSA
 # define PLAT_ARM_MAX_BL2_SIZE		0x1D000
 #else
@@ -139,22 +139,21 @@
 #endif
 
 /*
- * PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
- * little space for growth.
- * SCP_BL2 image is loaded into the space BL31 -> BL1_RW_BASE.
- * For TBB use case, PLAT_ARM_MAX_BL1_RW_SIZE has been increased and therefore
- * PLAT_ARM_MAX_BL31_SIZE has been increased to ensure SCP_BL2 has the same
- * space available.
+ * Since BL31 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL31_SIZE is
+ * calculated using the current BL31 PROGBITS debug size plus the sizes of
+ * BL2 and BL1-RW.  SCP_BL2 image is loaded into the space BL31 -> BL2_BASE.
+ * Hence the BL31 PROGBITS size should be >= PLAT_CSS_MAX_SCP_BL2_SIZE.
  */
-#define PLAT_ARM_MAX_BL31_SIZE		0x1E000
+#define PLAT_ARM_MAX_BL31_SIZE		0x3E000
 
 #if JUNO_AARCH32_EL3_RUNTIME
 /*
- * PLAT_ARM_MAX_BL32_SIZE is calculated for SP_MIN as the AArch32 Secure
- * Payload. We also need to take care of SCP_BL2 size as well, as the SCP_BL2
- * is loaded into the space BL32 -> BL1_RW_BASE
+ * Since BL32 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL32_SIZE is
+ * calculated using the current BL32 PROGBITS debug size plus the sizes of
+ * BL2 and BL1-RW.  SCP_BL2 image is loaded into the space BL32 -> BL2_BASE.
+ * Hence the BL32 PROGBITS size should be >= PLAT_CSS_MAX_SCP_BL2_SIZE.
  */
-# define PLAT_ARM_MAX_BL32_SIZE		0x1E000
+#define PLAT_ARM_MAX_BL32_SIZE		0x3E000
 #endif
 
 /*
diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c
index d2da54d..fd7a9e9 100644
--- a/plat/arm/common/arm_bl2_setup.c
+++ b/plat/arm/common/arm_bl2_setup.c
@@ -25,18 +25,10 @@
 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
 
 /*
- * Check that BL2_BASE is atleast a page over ARM_BL_RAM_BASE. The page is for
- * `meminfo_t` data structure and TB_FW_CONFIG passed from BL1. Not needed
- * when BL2 is compiled for BL_AT_EL3 as BL2 doesn't need any info from BL1 and
- * BL2 is loaded at base of usable SRAM.
+ * Check that BL2_BASE is above ARM_TB_FW_CONFIG_LIMIT. This reserved page is
+ * for `meminfo_t` data structure and fw_configs passed from BL1.
  */
-#if BL2_AT_EL3
-#define BL1_MEMINFO_OFFSET	0x0
-#else
-#define BL1_MEMINFO_OFFSET	PAGE_SIZE
-#endif
-
-CASSERT(BL2_BASE >= (ARM_BL_RAM_BASE + BL1_MEMINFO_OFFSET), assert_bl2_base_overflows);
+CASSERT(BL2_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_bl2_base_overflows);
 
 /* Weak definitions may be overridden in specific ARM standard platform */
 #pragma weak bl2_early_platform_setup2
diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c
index 551e700..46f7ae0 100644
--- a/plat/arm/common/arm_bl31_setup.c
+++ b/plat/arm/common/arm_bl31_setup.c
@@ -25,6 +25,11 @@
 static entry_point_info_t bl32_image_ep_info;
 static entry_point_info_t bl33_image_ep_info;
 
+/*
+ * Check that BL31_BASE is above ARM_TB_FW_CONFIG_LIMIT. The reserved page
+ * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
+ */
+CASSERT(BL31_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
 
 /* Weak definitions may be overridden in specific ARM standard platform */
 #pragma weak bl31_early_platform_setup2
diff --git a/plat/arm/common/arm_dyn_cfg.c b/plat/arm/common/arm_dyn_cfg.c
index 3f0a9b4..32a515b 100644
--- a/plat/arm/common/arm_dyn_cfg.c
+++ b/plat/arm/common/arm_dyn_cfg.c
@@ -143,8 +143,8 @@
 			if (check_uptr_overflow(image_base, image_size) != 0)
 				continue;
 
-			/* Ensure the configs don't overlap with BL2 */
-			if ((image_base > BL2_BASE) || ((image_base + image_size) > BL2_BASE))
+			/* Ensure the configs don't overlap with BL31 */
+			if ((image_base > BL31_BASE) || ((image_base + image_size) > BL31_BASE))
 				continue;
 
 			/* Ensure the configs are loaded in a valid address */
diff --git a/plat/arm/common/sp_min/arm_sp_min_setup.c b/plat/arm/common/sp_min/arm_sp_min_setup.c
index 9a6c074..b42e35f 100644
--- a/plat/arm/common/sp_min/arm_sp_min_setup.c
+++ b/plat/arm/common/sp_min/arm_sp_min_setup.c
@@ -22,6 +22,11 @@
 #pragma weak sp_min_plat_arch_setup
 #pragma weak plat_arm_sp_min_early_platform_setup
 
+/*
+ * Check that BL32_BASE is above ARM_TB_FW_CONFIG_LIMIT. The reserved page
+ * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
+ */
+CASSERT(BL32_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_bl32_base_overflows);
 
 /*******************************************************************************
  * Return a pointer to the 'entry_point_info' structure of the next image for the
diff --git a/plat/arm/css/drivers/scp/css_bom_bootloader.c b/plat/arm/css/drivers/scp/css_bom_bootloader.c
index 42ed30d..5268510 100644
--- a/plat/arm/css/drivers/scp/css_bom_bootloader.c
+++ b/plat/arm/css/drivers/scp/css_bom_bootloader.c
@@ -47,16 +47,16 @@
 } cmd_data_payload_t;
 
 /*
- * All CSS platforms load SCP_BL2/SCP_BL2U just below BL rw-data and above
- * BL2/BL2U (this is where BL31 usually resides except when ARM_BL31_IN_DRAM is
- * set. Ensure that SCP_BL2/SCP_BL2U do not overflow into BL1 rw-data nor
- * BL2/BL2U.
+ * All CSS platforms load SCP_BL2/SCP_BL2U just below BL2 (this is where BL31
+ * usually resides except when ARM_BL31_IN_DRAM is
+ * set). Ensure that SCP_BL2/SCP_BL2U do not overflow into shared RAM and
+ * the tb_fw_config.
  */
-CASSERT(SCP_BL2_LIMIT <= BL1_RW_BASE, assert_scp_bl2_overwrite_bl1);
-CASSERT(SCP_BL2U_LIMIT <= BL1_RW_BASE, assert_scp_bl2u_overwrite_bl1);
+CASSERT(SCP_BL2_LIMIT <= BL2_BASE, assert_scp_bl2_overwrite_bl2);
+CASSERT(SCP_BL2U_LIMIT <= BL2_BASE, assert_scp_bl2u_overwrite_bl2);
 
-CASSERT(SCP_BL2_BASE >= BL2_LIMIT, assert_scp_bl2_overwrite_bl2);
-CASSERT(SCP_BL2U_BASE >= BL2U_LIMIT, assert_scp_bl2u_overwrite_bl2u);
+CASSERT(SCP_BL2_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_scp_bl2_overflow);
+CASSERT(SCP_BL2U_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_scp_bl2u_overflow);
 
 static void scp_boot_message_start(void)
 {
diff --git a/plat/arm/css/drivers/scp/css_scp.h b/plat/arm/css/drivers/scp/css_scp.h
index 1f0cf8e..671612a 100644
--- a/plat/arm/css/drivers/scp/css_scp.h
+++ b/plat/arm/css/drivers/scp/css_scp.h
@@ -34,17 +34,17 @@
 int css_scp_boot_ready(void);
 
 #if CSS_LOAD_SCP_IMAGES
+
 /*
- * All CSS platforms load SCP_BL2/SCP_BL2U just below BL rw-data and above
- * BL2/BL2U (this is where BL31 usually resides except when ARM_BL31_IN_DRAM is
- * set. Ensure that SCP_BL2/SCP_BL2U do not overflow into BL1 rw-data nor
- * BL2/BL2U.
+ * All CSS platforms load SCP_BL2/SCP_BL2U just below BL2 (this is where BL31
+ * usually resides except when ARM_BL31_IN_DRAM is
+ * set). Ensure that SCP_BL2/SCP_BL2U do not overflow into tb_fw_config.
  */
-CASSERT(SCP_BL2_LIMIT <= BL1_RW_BASE, assert_scp_bl2_limit_overwrite_bl1);
-CASSERT(SCP_BL2U_LIMIT <= BL1_RW_BASE, assert_scp_bl2u_limit_overwrite_bl1);
+CASSERT(SCP_BL2_LIMIT <= BL2_BASE, assert_scp_bl2_overwrite_bl2);
+CASSERT(SCP_BL2U_LIMIT <= BL2_BASE, assert_scp_bl2u_overwrite_bl2);
 
-CASSERT(SCP_BL2_BASE >= BL2_LIMIT, assert_scp_bl2_overwrite_bl2);
-CASSERT(SCP_BL2U_BASE >= BL2U_LIMIT, assert_scp_bl2u_overwrite_bl2u);
+CASSERT(SCP_BL2_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_scp_bl2_overflow);
+CASSERT(SCP_BL2U_BASE >= ARM_TB_FW_CONFIG_LIMIT, assert_scp_bl2u_overflow);
 #endif
 
 #endif	/* __CSS_SCP_H__ */
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index eb736c0..45c4704 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -10,7 +10,8 @@
 #include <runtime_svc.h>
 #include <smccc.h>
 #include <smccc_helpers.h>
-#include <workaround_cve_2017_5715.h>
+#include <wa_cve_2017_5715.h>
+#include <wa_cve_2018_3639.h>
 
 static int32_t smccc_version(void)
 {
@@ -25,10 +26,31 @@
 		return SMC_OK;
 #if WORKAROUND_CVE_2017_5715
 	case SMCCC_ARCH_WORKAROUND_1:
-		if (check_workaround_cve_2017_5715() == ERRATA_NOT_APPLIES)
+		if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)
 			return 1;
 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
 #endif
+#if WORKAROUND_CVE_2018_3639
+	case SMCCC_ARCH_WORKAROUND_2:
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+		/*
+		 * On a platform where at least one CPU requires
+		 * dynamic mitigation but others are either unaffected
+		 * or permanently mitigated, report the latter as not
+		 * needing dynamic mitigation.
+		 */
+		if (wa_cve_2018_3639_get_disable_ptr() == NULL)
+			return 1;
+		/*
+		 * If we get here, this CPU requires dynamic mitigation
+		 * so report it as such.
+		 */
+		return 0;
+#else
+		/* Either the CPUs are unaffected or permanently mitigated */
+		return SMCCC_ARCH_NOT_REQUIRED;
+#endif
+#endif
 	default:
 		return SMC_UNK;
 	}
@@ -60,6 +82,16 @@
 		 */
 		SMC_RET0(handle);
 #endif
+#if WORKAROUND_CVE_2018_3639
+	case SMCCC_ARCH_WORKAROUND_2:
+		/*
+		 * The workaround has already been applied on affected PEs
+		 * requiring dynamic mitigation during entry to EL3.
+		 * On unaffected or statically mitigated PEs, this function
+		 * has no effect.
+		 */
+		SMC_RET0(handle);
+#endif
 	default:
 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
 			smc_fid);