Merge pull request #1902 from jts-arm/romlib

ROMLIB bug fixes
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index aa9d007..c295176 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -419,6 +419,9 @@
 	 */
 	mov	x0, #SMC_UNK
 	str	x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+#if CTX_INCLUDE_PAUTH_REGS
+	bl	pauth_context_save
+#endif
 	b	restore_gp_registers_eret
 
 smc_prohibited:
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index c88b1f6..5d6f4f8 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -742,6 +742,11 @@
    (Coherent memory region is included) or 0 (Coherent memory region is
    excluded). Default is 1.
 
+-  ``USE_ROMLIB``: This flag determines whether library at ROM will be used.
+   This feature creates a library of functions to be placed in ROM and thus
+   reduces SRAM usage. Refer to `Library at ROM`_ for further details. Default
+   is 0.
+
 -  ``V``: Verbose build. If assigned anything other than 0, the build commands
    are printed. Default is 0.
 
@@ -2102,3 +2107,4 @@
 .. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022d/Power_State_Coordination_Interface_PDD_v1_1_DEN0022D.pdf
 .. _Secure Partition Manager Design guide: secure-partition-manager-design.rst
 .. _`Trusted Firmware-A Coding Guidelines`: coding-guidelines.rst
+   _`Library at ROM`: romlib-design.rst
\ No newline at end of file
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index ac51343..e544018 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -13,20 +13,17 @@
 #include <plat_macros.S>
 #include <services/arm_arch_svc.h>
 
-#if !DYNAMIC_WORKAROUND_CVE_2018_3639
-#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
-#endif
-
 #define ESR_EL3_A64_SMC0	0x5e000000
 #define ESR_EL3_A32_SMC0	0x4e000000
 
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	/*
 	 * This macro applies the mitigation for CVE-2018-3639.
-	 * It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
+	 * It implements a fast path where `SMCCC_ARCH_WORKAROUND_2`
 	 * SMC calls from a lower EL running in AArch32 or AArch64
 	 * will go through the fast and return early.
 	 *
-	 * The macro saves x2-x3 to the context.  In the fast path
+	 * The macro saves x2-x3 to the context. In the fast path
 	 * x0-x3 registers do not need to be restored as the calling
 	 * context will have saved them.
 	 */
@@ -63,7 +60,7 @@
 		 * When the calling context wants mitigation disabled,
 		 * we program the mitigation disable function in the
 		 * CPU context, which gets invoked on subsequent exits from
-		 * EL3 via the `el3_exit` function.  Otherwise NULL is
+		 * EL3 via the `el3_exit` function. Otherwise NULL is
 		 * programmed in the CPU context, which results in caller's
 		 * inheriting the EL3 mitigation state (enabled) on subsequent
 		 * `el3_exit`.
@@ -82,7 +79,7 @@
 	.endif
 1:
 	/*
-	 * Always enable v4 mitigation during EL3 execution.  This is not
+	 * Always enable v4 mitigation during EL3 execution. This is not
 	 * required for the fast path above because it does not perform any
 	 * memory loads.
 	 */
@@ -188,6 +185,7 @@
 	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
 	b	serror_aarch32
 end_vector_entry cortex_a76_serror_aarch32
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
 
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex A76 Errata #1073348.
@@ -319,9 +317,13 @@
 	/* If the PE implements SSBS, we don't need the dynamic workaround */
 	mrs	x0, id_aa64pfr1_el1
 	lsr	x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
-	and     x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
+	and	x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
+#if !DYNAMIC_WORKAROUND_CVE_2018_3639 && ENABLE_ASSERTIONS
+	cmp	x0, 0
+	ASM_ASSERT(ne)
+#endif
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
 	cbnz	x0, 1f
-
 	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
 	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
 	msr	CORTEX_A76_CPUACTLR2_EL1, x0
@@ -330,16 +332,17 @@
 #ifdef IMAGE_BL31
 	/*
 	 * The Cortex-A76 generic vectors are overwritten to use the vectors
-	 * defined above.  This is required in order to apply mitigation
+	 * defined above. This is required in order to apply mitigation
 	 * against CVE-2018-3639 on exception entry from lower ELs.
 	 */
 	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
 	msr	vbar_el3, x0
 	isb
-#endif
+#endif /* IMAGE_BL31 */
 
 1:
-#endif
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+#endif /* WORKAROUND_CVE_2018_3639 */
 
 #if ERRATA_DSU_936184
 	bl	errata_dsu_936184_wa