Merge pull request #1382 from sandrine-bailleux-arm/topics/sb/fix-doc

Fix doc for bl31_plat_get_next_image_ep_info()
diff --git a/Makefile b/Makefile
index 1708a4c..b7116a7 100644
--- a/Makefile
+++ b/Makefile
@@ -387,6 +387,20 @@
     endif
 endif
 
+# For RAS_EXTENSION, require that EAs are handled in EL3 first
+ifeq ($(RAS_EXTENSION),1)
+    ifneq ($(HANDLE_EA_EL3_FIRST),1)
+        $(error For RAS_EXTENSION, HANDLE_EA_EL3_FIRST must also be 1)
+    endif
+endif
+
+# When FAULT_INJECTION_SUPPORT is used, require that RAS_EXTENSION is enabled
+ifeq ($(FAULT_INJECTION_SUPPORT),1)
+    ifneq ($(RAS_EXTENSION),1)
+        $(error For FAULT_INJECTION_SUPPORT, RAS_EXTENSION must also be 1)
+    endif
+endif
+
 ################################################################################
 # Process platform overrideable behaviour
 ################################################################################
@@ -514,8 +528,10 @@
 $(eval $(call assert_boolean,ENABLE_SPM))
 $(eval $(call assert_boolean,ENABLE_SVE_FOR_NS))
 $(eval $(call assert_boolean,ERROR_DEPRECATED))
+$(eval $(call assert_boolean,FAULT_INJECTION_SUPPORT))
 $(eval $(call assert_boolean,GENERATE_COT))
 $(eval $(call assert_boolean,GICV2_G0_FOR_EL3))
+$(eval $(call assert_boolean,HANDLE_EA_EL3_FIRST))
 $(eval $(call assert_boolean,HW_ASSISTED_COHERENCY))
 $(eval $(call assert_boolean,LOAD_IMAGE_V2))
 $(eval $(call assert_boolean,MULTI_CONSOLE_API))
@@ -523,6 +539,7 @@
 $(eval $(call assert_boolean,PL011_GENERIC_UART))
 $(eval $(call assert_boolean,PROGRAMMABLE_RESET_ADDRESS))
 $(eval $(call assert_boolean,PSCI_EXTENDED_STATE_ID))
+$(eval $(call assert_boolean,RAS_EXTENSION))
 $(eval $(call assert_boolean,RESET_TO_BL31))
 $(eval $(call assert_boolean,SAVE_KEYS))
 $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA))
@@ -561,7 +578,9 @@
 $(eval $(call add_define,ENABLE_SPM))
 $(eval $(call add_define,ENABLE_SVE_FOR_NS))
 $(eval $(call add_define,ERROR_DEPRECATED))
+$(eval $(call add_define,FAULT_INJECTION_SUPPORT))
 $(eval $(call add_define,GICV2_G0_FOR_EL3))
+$(eval $(call add_define,HANDLE_EA_EL3_FIRST))
 $(eval $(call add_define,HW_ASSISTED_COHERENCY))
 $(eval $(call add_define,LOAD_IMAGE_V2))
 $(eval $(call add_define,LOG_LEVEL))
@@ -571,6 +590,7 @@
 $(eval $(call add_define,PLAT_${PLAT}))
 $(eval $(call add_define,PROGRAMMABLE_RESET_ADDRESS))
 $(eval $(call add_define,PSCI_EXTENDED_STATE_ID))
+$(eval $(call add_define,RAS_EXTENSION))
 $(eval $(call add_define,RESET_TO_BL31))
 $(eval $(call add_define,SEPARATE_CODE_AND_RODATA))
 $(eval $(call add_define,SMCCC_MAJOR_VERSION))
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 1c3ed3f..346cd3b 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -8,6 +8,7 @@
 #include <asm_macros.S>
 #include <context.h>
 #include <cpu_data.h>
+#include <ea_handle.h>
 #include <interrupt_mgmt.h>
 #include <platform_def.h>
 #include <runtime_svc.h>
@@ -35,17 +36,77 @@
 	.globl	fiq_aarch32
 	.globl	serror_aarch32
 
+	/*
+	 * Macro that prepares entry to EL3 upon taking an exception.
+	 *
+	 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
+	 * instruction. When an error is thus synchronized, the handling is
+	 * delegated to platform EA handler.
+	 *
+	 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
+	 * Asynchronous External Aborts.
+	 */
+	.macro check_and_unmask_ea
+#if RAS_EXTENSION
+	/* Synchronize pending External Aborts */
+	esb
+
+	/* Unmask the SError interrupt */
+	msr	daifclr, #DAIF_ABT_BIT
+
+	/*
+	 * Explicitly save x30 so as to free up a register and to enable
+	 * branching
+	 */
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+	/* Check for SErrors synchronized by the ESB instruction */
+	mrs	x30, DISR_EL1
+	tbz	x30, #DISR_A_BIT, 1f
+
+	/* Save GP registers and restore them afterwards */
+	bl	save_gp_registers
+	mov	x0, #ERROR_EA_ESB
+	mrs	x1, DISR_EL1
+	bl	delegate_ea
+	bl	restore_gp_registers
+
+1:
+#else
+	/* Unmask the SError interrupt */
+	msr	daifclr, #DAIF_ABT_BIT
+
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+#endif
+	.endm
+
+	/*
+	 * Handle External Abort by delegating to the platform's EA handler.
+	 * Once the platform handler returns, the macro exits EL3 and returns to
+	 * where the abort was taken from.
+	 *
+	 * This macro assumes that x30 is available for use.
+	 *
+	 * 'abort_type' is a constant passed to the platform handler, indicating
+	 * the cause of the External Abort.
+	 */
+	.macro handle_ea abort_type
+	/* Save GP registers */
+	bl	save_gp_registers
+
+	/* Setup exception class and syndrome arguments for platform handler */
+	mov	x0, \abort_type
+	mrs	x1, esr_el3
+	adr	x30, el3_exit
+	b	delegate_ea
+	.endm
+
 	/* ---------------------------------------------------------------------
 	 * This macro handles Synchronous exceptions.
 	 * Only SMC exceptions are supported.
 	 * ---------------------------------------------------------------------
 	 */
 	.macro	handle_sync_exception
-	/* Enable the SError interrupt */
-	msr	daifclr, #DAIF_ABT_BIT
-
-	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-
 #if ENABLE_RUNTIME_INSTRUMENTATION
 	/*
 	 * Read the timestamp value and store it in per-cpu data. The value
@@ -69,6 +130,20 @@
 	cmp	x30, #EC_AARCH64_SMC
 	b.eq	smc_handler64
 
+	/* Check for I/D aborts from lower EL */
+	cmp	x30, #EC_IABORT_LOWER_EL
+	b.eq	1f
+
+	cmp	x30, #EC_DABORT_LOWER_EL
+	b.ne	2f
+
+1:
+	/* Test for EA bit in the instruction syndrome */
+	mrs	x30, esr_el3
+	tbz	x30, #ESR_ISS_EABORT_EA_BIT, 2f
+	handle_ea #ERROR_EA_SYNC
+
+2:
 	/* Other kinds of synchronous exceptions are not handled */
 	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 	b	report_unhandled_exception
@@ -81,12 +156,7 @@
 	 * ---------------------------------------------------------------------
 	 */
 	.macro	handle_interrupt_exception label
-	/* Enable the SError interrupt */
-	msr	daifclr, #DAIF_ABT_BIT
-
-	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 	bl	save_gp_registers
-
 	/* Save the EL3 system registers needed to return from this exception */
 	mrs	x0, spsr_el3
 	mrs	x1, elr_el3
@@ -154,25 +224,6 @@
 	.endm
 
 
-	.macro save_x4_to_x29_sp_el0
-	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
-	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
-	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
-	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
-	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
-	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
-	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
-	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
-	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
-	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
-	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
-	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
-	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-	mrs	x18, sp_el0
-	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
-	.endm
-
-
 vector_base runtime_exceptions
 
 	/* ---------------------------------------------------------------------
@@ -239,23 +290,29 @@
 	 * to a valid cpu context where the general purpose and system register
 	 * state can be saved.
 	 */
+	check_and_unmask_ea
 	handle_sync_exception
 	check_vector_size sync_exception_aarch64
 
 vector_entry irq_aarch64
+	check_and_unmask_ea
 	handle_interrupt_exception irq_aarch64
 	check_vector_size irq_aarch64
 
 vector_entry fiq_aarch64
+	check_and_unmask_ea
 	handle_interrupt_exception fiq_aarch64
 	check_vector_size fiq_aarch64
 
 vector_entry serror_aarch64
+	msr	daifclr, #DAIF_ABT_BIT
+
 	/*
-	 * SError exceptions from lower ELs are not currently supported.
-	 * Report their occurrence.
+	 * Explicitly save x30 so as to free up a register and to enable
+	 * branching
 	 */
-	b	report_unhandled_exception
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+	handle_ea #ERROR_EA_ASYNC
 	check_vector_size serror_aarch64
 
 	/* ---------------------------------------------------------------------
@@ -269,23 +326,29 @@
 	 * to a valid cpu context where the general purpose and system register
 	 * state can be saved.
 	 */
+	check_and_unmask_ea
 	handle_sync_exception
 	check_vector_size sync_exception_aarch32
 
 vector_entry irq_aarch32
+	check_and_unmask_ea
 	handle_interrupt_exception irq_aarch32
 	check_vector_size irq_aarch32
 
 vector_entry fiq_aarch32
+	check_and_unmask_ea
 	handle_interrupt_exception fiq_aarch32
 	check_vector_size fiq_aarch32
 
 vector_entry serror_aarch32
+	msr	daifclr, #DAIF_ABT_BIT
+
 	/*
-	 * SError exceptions from lower ELs are not currently supported.
-	 * Report their occurrence.
+	 * Explicitly save x30 so as to free up a register and to enable
+	 * branching
 	 */
-	b	report_unhandled_exception
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+	handle_ea #ERROR_EA_ASYNC
 	check_vector_size serror_aarch32
 
 
@@ -345,7 +408,21 @@
 	 *
 	 * Save x4-x29 and sp_el0.
 	 */
-	save_x4_to_x29_sp_el0
+	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+	mrs	x18, sp_el0
+	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
 
 	mov	x5, xzr
 	mov	x6, sp
@@ -431,14 +508,12 @@
 
 smc_unknown:
 	/*
-	 * Here we restore x4-x18 regardless of where we came from. AArch32
-	 * callers will find the registers contents unchanged, but AArch64
-	 * callers will find the registers modified (with stale earlier NS
-	 * content). Either way, we aren't leaking any secure information
-	 * through them.
+	 * Unknown SMC call. Populate return value with SMC_UNK, restore
+	 * GP registers, and return to caller.
 	 */
 	mov	x0, #SMC_UNK
-	b	restore_gp_registers_callee_eret
+	str	x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	b	restore_gp_registers_eret
 
 smc_prohibited:
 	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
@@ -450,3 +525,62 @@
 	msr	spsel, #1
 	no_ret	report_unhandled_exception
 endfunc smc_handler
+
+/*
+ * Delegate External Abort handling to platform's EA handler. This function
+ * assumes that all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func delegate_ea
+	/* Save EL3 state */
+	mrs	x2, spsr_el3
+	mrs	x3, elr_el3
+	stp	x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
+	/*
+	 * Save ESR as handling might involve lower ELs, and returning back to
+	 * EL3 from there would trample the original ESR.
+	 */
+	mrs	x4, scr_el3
+	mrs	x5, esr_el3
+	stp	x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+
+	/*
+	 * Setup rest of arguments, and call platform External Abort handler.
+	 *
+	 * x0: EA reason (already in place)
+	 * x1: Exception syndrome (already in place).
+	 * x2: Cookie (unused for now).
+	 * x3: Context pointer.
+	 * x4: Flags (security state from SCR for now).
+	 */
+	mov	x2, xzr
+	mov	x3, sp
+	ubfx	x4, x4, #0, #1
+
+	/* Switch to runtime stack */
+	ldr	x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+	msr	spsel, #0
+	mov	sp, x5
+
+	mov	x29, x30
+	bl	plat_ea_handler
+	mov	x30, x29
+
+	/* Make SP point to context */
+	msr	spsel, #1
+
+	/* Restore EL3 state */
+	ldp	x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+	msr	spsr_el3, x1
+	msr	elr_el3, x2
+
+	/* Restore ESR_EL3 and SCR_EL3 */
+	ldp	x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+	msr	scr_el3, x3
+	msr	esr_el3, x4
+
+	ret
+endfunc delegate_ea
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index aed54a6..069ad11 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -390,6 +390,14 @@
    handled at EL3, and a panic will result. This is supported only for AArch64
    builds.
 
+-  ``FAULT_INJECTION_SUPPORT``: ARMv8.4 externsions introduced support for fault
+   injection from lower ELs, and this build option enables lower ELs to use
+   Error Records accessed via System Registers to inject faults. This is
+   applicable only to AArch64 builds.
+
+   This feature is intended for testing purposes only, and is advisable to keep
+   disabled for production images.
+
 -  ``FIP_NAME``: This is an optional build option which specifies the FIP
    filename for the ``fip`` target. Default is ``fip.bin``.
 
@@ -531,6 +539,15 @@
    smc function id. When this option is enabled on Arm platforms, the
    option ``ARM_RECOM_STATE_ID_ENC`` needs to be set to 1 as well.
 
+-  ``RAS_EXTENSION``: When set to ``1``, enable Armv8.2 RAS features. RAS features
+   are an optional extension for pre-Armv8.2 CPUs, but are mandatory for Armv8.2
+   or later CPUs.
+
+   When ``RAS_EXTENSION`` is set to ``1``, ``HANDLE_EA_EL3_FIRST`` must also be
+   set to ``1``.
+
+   This option is disabled by default.
+
 -  ``RESET_TO_BL31``: Enable BL31 entrypoint as the CPU reset vector instead
    of the BL1 entrypoint. It can take the value 0 (CPU reset to BL1
    entrypoint) or 1 (CPU reset to BL31 entrypoint).
@@ -590,7 +607,7 @@
    firmware images have been loaded in memory, and the MMU and caches are
    turned off. Refer to the "Debugging options" section for more details.
 
-- ``SP_MIN_WITH_SECURE_FIQ``: Boolean flag to indicate the SP_MIN handles
+-  ``SP_MIN_WITH_SECURE_FIQ``: Boolean flag to indicate the SP_MIN handles
    secure interrupts (caught through the FIQ line). Platforms can enable
    this directive if they need to handle such interruption. When enabled,
    the FIQ are handled in monitor mode and non secure world is not allowed
@@ -678,6 +695,15 @@
    Trusted Watchdog may be disabled at build time for testing or development
    purposes.
 
+-  ``ARM_LINUX_KERNEL_AS_BL33``: The Linux kernel expects registers x0-x3 to
+   have specific values at boot. This boolean option allows the Trusted Firmware
+   to have a Linux kernel image as BL33 by preparing the registers to these
+   values before jumping to BL33. This option defaults to 0 (disabled). For now,
+   it  only supports AArch64 kernels. ``RESET_TO_BL31`` must be 1 when using it.
+   If this option is set to 1, ``ARM_PRELOADED_DTB_BASE`` must be set to the
+   location of a device tree blob (DTB) already loaded in memory.  The Linux
+   Image address must be specified using the ``PRELOADED_BL33_BASE`` option.
+
 -  ``ARM_RECOM_STATE_ID_ENC``: The PSCI1.0 specification recommends an encoding
    for the construction of composite state-ID in the power-state parameter.
    The existing PSCI clients currently do not support this encoding of
@@ -1475,41 +1501,92 @@
 
     make PRELOADED_BL33_BASE=0x80000000 PLAT=fvp all fip
 
-Boot of a preloaded bootwrapped kernel image on Base FVP
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Boot of a preloaded kernel image on Base FVP
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The following example uses the AArch64 boot wrapper. This simplifies normal
-world booting while also making use of TF-A features. It can be obtained from
-its repository with:
+The following example uses a simplified boot flow by directly jumping from the
+TF-A to the Linux kernel, which will use a ramdisk as filesystem. This can be
+useful if both the kernel and the device tree blob (DTB) are already present in
+memory (like in FVP).
+
+For example, if the kernel is loaded at ``0x80080000`` and the DTB is loaded at
+address ``0x82000000``, the firmware can be built like this:
 
 ::
 
-    git clone git://git.kernel.org/pub/scm/linux/kernel/git/mark/boot-wrapper-aarch64.git
+    CROSS_COMPILE=aarch64-linux-gnu-  \
+    make PLAT=fvp DEBUG=1             \
+    RESET_TO_BL31=1                   \
+    ARM_LINUX_KERNEL_AS_BL33=1        \
+    PRELOADED_BL33_BASE=0x80080000    \
+    ARM_PRELOADED_DTB_BASE=0x82000000 \
+    all fip
 
-After compiling it, an ELF file is generated. It can be loaded with the
-following command:
+Now, it is needed to modify the DTB so that the kernel knows the address of the
+ramdisk. The following script generates a patched DTB from the provided one,
+assuming that the ramdisk is loaded at address ``0x84000000``. Note that this
+script assumes that the user is using a ramdisk image prepared for U-Boot, like
+the ones provided by Linaro. If using a ramdisk without this header,the ``0x40``
+offset in ``INITRD_START`` has to be removed.
 
-::
+.. code:: bash
 
-    <path-to>/FVP_Base_AEMv8A-AEMv8A              \
-        -C bp.secureflashloader.fname=bl1.bin     \
-        -C bp.flashloader0.fname=fip.bin          \
-        -a cluster0.cpu0=<bootwrapped-kernel.elf> \
-        --start cluster0.cpu0=0x0
+    #!/bin/bash
 
-The ``-a cluster0.cpu0=<bootwrapped-kernel.elf>`` option loads the ELF file. It
-also sets the PC register to the ELF entry point address, which is not the
-desired behaviour, so the ``--start cluster0.cpu0=0x0`` option forces the PC back
-to 0x0 (the BL1 entry point address) on CPU #0. The ``PRELOADED_BL33_BASE`` define
-used when compiling the FIP must match the ELF entry point.
+    # Path to the input DTB
+    KERNEL_DTB=<path-to>/<fdt>
+    # Path to the output DTB
+    PATCHED_KERNEL_DTB=<path-to>/<patched-fdt>
+    # Base address of the ramdisk
+    INITRD_BASE=0x84000000
+    # Path to the ramdisk
+    INITRD=<path-to>/<ramdisk.img>
 
-Boot of a preloaded bootwrapped kernel image on Juno
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    # Skip uboot header (64 bytes)
+    INITRD_START=$(printf "0x%x" $((${INITRD_BASE} + 0x40)) )
+    INITRD_SIZE=$(stat -Lc %s ${INITRD})
+    INITRD_END=$(printf "0x%x" $((${INITRD_BASE} + ${INITRD_SIZE})) )
+
+    CHOSEN_NODE=$(echo                                        \
+    "/ {                                                      \
+            chosen {                                          \
+                    linux,initrd-start = <${INITRD_START}>;   \
+                    linux,initrd-end = <${INITRD_END}>;       \
+            };                                                \
+    };")
+
+    echo $(dtc -O dts -I dtb ${KERNEL_DTB}) ${CHOSEN_NODE} |  \
+            dtc -O dtb -o ${PATCHED_KERNEL_DTB} -
+
+And the FVP binary can be run with the following command:
+
+::
+
+    <path-to>/FVP_Base_AEMv8A-AEMv8A                            \
+    -C pctl.startup=0.0.0.0                                     \
+    -C bp.secure_memory=1                                       \
+    -C cluster0.NUM_CORES=4                                     \
+    -C cluster1.NUM_CORES=4                                     \
+    -C cache_state_modelled=1                                   \
+    -C cluster0.cpu0.RVBAR=0x04020000                           \
+    -C cluster0.cpu1.RVBAR=0x04020000                           \
+    -C cluster0.cpu2.RVBAR=0x04020000                           \
+    -C cluster0.cpu3.RVBAR=0x04020000                           \
+    -C cluster1.cpu0.RVBAR=0x04020000                           \
+    -C cluster1.cpu1.RVBAR=0x04020000                           \
+    -C cluster1.cpu2.RVBAR=0x04020000                           \
+    -C cluster1.cpu3.RVBAR=0x04020000                           \
+    --data cluster0.cpu0="<path-to>/bl31.bin"@0x04020000        \
+    --data cluster0.cpu0="<path-to>/<patched-fdt>"@0x82000000   \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000 \
+    --data cluster0.cpu0="<path-to>/<ramdisk.img>"@0x84000000
+
+Boot of a preloaded kernel image on Juno
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The procedure to obtain and compile the boot wrapper is very similar to the case
-of the FVP. The execution must be stopped at the end of bl2\_main(), and the
-loading method explained above in the EL3 payload boot flow section may be used
-to load the ELF file over JTAG on Juno.
+The Trusted Firmware must be compiled in a similar way as for FVP explained
+above. The process to load binaries to memory is the one explained in
+`Booting an EL3 payload on Juno`_.
 
 Running the software on FVP
 ---------------------------
diff --git a/drivers/console/aarch64/multi_console.S b/drivers/console/aarch64/multi_console.S
index 15c3ba4..a85a6a5 100644
--- a/drivers/console/aarch64/multi_console.S
+++ b/drivers/console/aarch64/multi_console.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,7 +10,8 @@
 
 	.globl	console_register
 	.globl	console_unregister
-	.globl  console_set_scope
+	.globl	console_is_registered
+	.globl	console_set_scope
 	.globl	console_switch_state
 	.globl	console_putc
 	.globl	console_getc
@@ -38,13 +39,15 @@
 	 * persistent memory (e.g. the data section).
 	 * In : x0 - address of console_t structure
 	 * Out: x0 - Always 1 (for easier tail calling)
-	 * Clobber list: x0, x1, x14
+	 * Clobber list: x0, x1, x14, x15
 	 * -----------------------------------------------
 	 */
 func console_register
 #if ENABLE_ASSERTIONS
+	/* Assert that x0 isn't a NULL pointer */
 	cmp	x0, #0
 	ASM_ASSERT(ne)
+	/* Assert that the struct isn't in the stack */
 	adrp	x1, __STACKS_START__
 	add	x1, x1, :lo12:__STACKS_START__
 	cmp	x0, x1
@@ -54,6 +57,14 @@
 	cmp	x0, x1
 	ASM_ASSERT(hs)
 not_on_stack:
+	/* Assert that this struct isn't in the list */
+	mov	x1, x0 /* Preserve x0 and x30 */
+	mov	x15, x30
+	bl	console_is_registered
+	cmp	x0, #0
+	ASM_ASSERT(eq)
+	mov	x30, x15
+	mov	x0, x1
 #endif /* ENABLE_ASSERTIONS */
 	adrp	x14, console_list
 	ldr	x1, [x14, :lo12:console_list]	/* X1 = first struct in list */
@@ -73,6 +84,11 @@
 	 * -----------------------------------------------
 	 */
 func console_unregister
+#if ENABLE_ASSERTIONS
+	/* Assert that x0 isn't a NULL pointer */
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
 	adrp	x14, console_list
 	add	x14, x14, :lo12:console_list	/* X14 = ptr to first struct */
 	ldr	x1, [x14]			/* X1 = first struct */
@@ -96,6 +112,37 @@
 endfunc console_unregister
 
 	/* -----------------------------------------------
+	 * int console_is_registered(console_t *console)
+	 * Function to detect if a specific console is
+	 * registered or not.
+	 * In: x0 - address of console_t struct to remove
+	 * Out: x0 - 1 if it is registered, 0 if not.
+	 * Clobber list: x0, x14
+	 * -----------------------------------------------
+	 */
+func console_is_registered
+#if ENABLE_ASSERTIONS
+	/* Assert that x0 isn't a NULL pointer */
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+	adrp	x14, console_list
+	ldr	x14, [x14, :lo12:console_list]	/* X14 = first console struct */
+check_registered_loop:
+	cbz	x14, console_not_registered /* Check if end of list */
+	cmp	x0, x14		/* Check if the pointers are different */
+	b.eq	console_registered
+	ldr	x14, [x14, #CONSOLE_T_NEXT]	/* Get pointer to next struct */
+	b	check_registered_loop
+console_not_registered:
+	mov	x0, #0
+	ret
+console_registered:
+	mov	x0, #1
+	ret
+endfunc console_is_registered
+
+	/* -----------------------------------------------
 	 * void console_switch_state(unsigned int new_state)
 	 * Function to switch the current console state.
 	 * The console state determines which of the
diff --git a/include/bl31/ea_handle.h b/include/bl31/ea_handle.h
new file mode 100644
index 0000000..060c9b7
--- /dev/null
+++ b/include/bl31/ea_handle.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __EA_HANDLE_H__
+#define __EA_HANDLE_H__
+
+/* Constants indicating the reason for an External Abort */
+
+/* External Abort received at SError vector */
+#define ERROR_EA_ASYNC		0
+
+/* Synchronous External Abort received at Synchronous exception vector */
+#define ERROR_EA_SYNC		1
+
+/* External Abort synchronized by ESB instruction */
+#define ERROR_EA_ESB		2
+
+/* RAS event signalled as peripheral interrupt */
+#define ERROR_INTERRUPT		3
+
+#endif /* __EA_HANDLE_H__ */
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
index 94a9df9..7c8e643 100644
--- a/include/common/aarch64/asm_macros.S
+++ b/include/common/aarch64/asm_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -192,4 +192,10 @@
 	.space	SPINLOCK_ASM_SIZE
 	.endm
 
+#if RAS_EXTENSION
+	.macro esb
+	.inst	0xd503221f
+	.endm
+#endif
+
 #endif /* __ASM_MACROS_S__ */
diff --git a/include/drivers/console.h b/include/drivers/console.h
index f8ec83d..0855170 100644
--- a/include/drivers/console.h
+++ b/include/drivers/console.h
@@ -50,7 +50,12 @@
  */
 /* Remove a single console_t instance from the console list. */
 int console_unregister(console_t *console);
-/* Set scope mask of a console that determines in what states it is active. */
+/* Returns 1 if this console is already registered, 0 if not */
+int console_is_registered(console_t *console);
+/*
+ * Set scope mask of a console that determines in what states it is active.
+ * By default they are registered with (CONSOLE_FLAG_BOOT|CONSOLE_FLAG_CRASH).
+ */
 void console_set_scope(console_t *console, unsigned int scope);
 
 /* Switch to a new global console state (CONSOLE_FLAG_BOOT/RUNTIME/CRASH). */
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index ff3881e..92bb97d 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -215,6 +215,7 @@
 
 /* SCR definitions */
 #define SCR_RES1_BITS		((U(1) << 4) | (U(1) << 5))
+#define SCR_FIEN_BIT		(U(1) << 21)
 #define SCR_TWE_BIT		(U(1) << 13)
 #define SCR_TWI_BIT		(U(1) << 12)
 #define SCR_ST_BIT		(U(1) << 11)
@@ -528,6 +529,12 @@
 #define EC_AARCH64_FP			U(0x2c)
 #define EC_SERROR			U(0x2f)
 
+/*
+ * External Abort bit in Instruction and Data Aborts synchronous exception
+ * syndromes.
+ */
+#define ESR_ISS_EABORT_EA_BIT		U(9)
+
 #define EC_BITS(x)			(((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
 
 /* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
@@ -705,4 +712,23 @@
 #define AMCGCR_EL0_CG1NC_LENGTH	U(8)
 #define AMCGCR_EL0_CG1NC_MASK	U(0xff)
 
+/*******************************************************************************
+ * RAS system registers
+ *******************************************************************************/
+#define DISR_EL1		S3_0_C12_C1_1
+#define DISR_A_BIT		31
+
+#define ERRIDR_EL1		S3_0_C5_C3_0
+#define ERRIDR_MASK		0xffff
+
+#define ERRSELR_EL1		S3_0_C5_C3_1
+
+/* System register access to Standard Error Record registers */
+#define ERXFR_EL1		S3_0_C5_C4_0
+#define ERXCTLR_EL1		S3_0_C5_C4_1
+#define ERXSTATUS_EL1		S3_0_C5_C4_2
+#define ERXADDR_EL1		S3_0_C5_C4_3
+#define ERXMISC0_EL1		S3_0_C5_C4_4
+#define ERXMISC1_EL1		S3_0_C5_C4_5
+
 #endif /* __ARCH_H__ */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index c346f79..58ec943 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -333,6 +333,16 @@
 DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
 DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
 
+DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erxfr_el1, ERXFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxctlr_el1, ERXCTLR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxstatus_el1, ERXSTATUS_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
+
 #define IS_IN_EL(x) \
 	(GET_EL(read_CurrentEl()) == MODE_EL##x)
 
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 5f6bdc9..cdd74a3 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -7,6 +7,8 @@
 #ifndef __CONTEXT_H__
 #define __CONTEXT_H__
 
+#include <utils_def.h>
+
 /*******************************************************************************
  * Constants that allow assembler code to access members of and the 'gp_regs'
  * structure at their correct offsets.
@@ -53,10 +55,12 @@
  ******************************************************************************/
 #define CTX_EL3STATE_OFFSET	(CTX_GPREGS_OFFSET + CTX_GPREGS_END)
 #define CTX_SCR_EL3		U(0x0)
-#define CTX_RUNTIME_SP		U(0x8)
-#define CTX_SPSR_EL3		U(0x10)
-#define CTX_ELR_EL3		U(0x18)
-#define CTX_EL3STATE_END	U(0x20)
+#define CTX_ESR_EL3		U(0x8)
+#define CTX_RUNTIME_SP		U(0x10)
+#define CTX_SPSR_EL3		U(0x18)
+#define CTX_ELR_EL3		U(0x20)
+#define CTX_UNUSED		U(0x28)
+#define CTX_EL3STATE_END	U(0x30)
 
 /*******************************************************************************
  * Constants that allow assembler code to access members of and the
diff --git a/include/lib/extensions/ras.h b/include/lib/extensions/ras.h
new file mode 100644
index 0000000..f57fc3a
--- /dev/null
+++ b/include/lib/extensions/ras.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __RAS_COMMON__
+#define __RAS_COMMON__
+
+#define ERR_HANDLER_VERSION	1
+
+/* Error record access mechanism */
+#define ERR_ACCESS_SYSREG	0
+#define ERR_ACCESS_MEMMAP	1
+
+/*
+ * Register all error records on the platform.
+ *
+ * This macro must be used in the same file as the array of error record info
+ * are declared. Only then would ARRAY_SIZE() yield a meaningful value.
+ */
+#define REGISTER_ERR_RECORD_INFO(_records) \
+	const struct err_record_mapping err_record_mapping = { \
+		.err_records = _records, \
+		.num_err_records = ARRAY_SIZE(_records), \
+	}
+
+/* Error record info iterator */
+#define for_each_err_record_info(_i, _info) \
+	for (_i = 0, _info = err_record_mapping.err_records; \
+		_i < err_record_mapping.num_err_records; \
+		_i++, _info++)
+
+#define _ERR_RECORD_COMMON(_probe, _handler, _aux) \
+	.probe = _probe, \
+	.handler = _handler, \
+	.aux_data = _aux,
+
+#define ERR_RECORD_SYSREG_V1(_idx_start, _num_idx, _probe, _handler, _aux) \
+	{ \
+		.version = 1, \
+		.sysreg.idx_start = _idx_start, \
+		.sysreg.num_idx = _num_idx, \
+		.access = ERR_ACCESS_SYSREG, \
+		_ERR_RECORD_COMMON(_probe, _handler, _aux) \
+	}
+
+#define ERR_RECORD_MEMMAP_V1(_base_addr, _size_num_k, _probe, _handler, _aux) \
+	{ \
+		.version = 1, \
+		.memmap.base_addr = _base_addr, \
+		.memmap.size_num_k = _size_num_k, \
+		.access = ERR_ACCESS_MEMMAP, \
+		_ERR_RECORD_COMMON(_probe, _handler, _aux) \
+	}
+
+/*
+ * Macro to be used to name and declare an array of RAS interrupts along with
+ * their handlers.
+ *
+ * This macro must be used in the same file as the array of interrupts are
+ * declared. Only then would ARRAY_SIZE() yield a meaningful value. Also, the
+ * array is expected to be sorted in the increasing order of interrupt number.
+ */
+#define REGISTER_RAS_INTERRUPTS(_array) \
+	const struct ras_interrupt_mapping ras_interrupt_mapping = { \
+		.intrs = _array, \
+		.num_intrs = ARRAY_SIZE(_array), \
+	}
+
+#ifndef __ASSEMBLY__
+
+#include <assert.h>
+#include <ras_arch.h>
+
+struct err_record_info;
+
+struct ras_interrupt {
+	/* Interrupt number, and the associated error record info */
+	unsigned int intr_number;
+	struct err_record_info *err_record;
+	void *cookie;
+};
+
+/* Function to probe a error record group for error */
+typedef int (*err_record_probe_t)(const struct err_record_info *info,
+		int *probe_data);
+
+/* Data passed to error record group handler */
+struct err_handler_data {
+	/* Info passed on from top-level exception handler */
+	uint64_t flags;
+	void *cookie;
+	void *handle;
+
+	/* Data structure version */
+	unsigned int version;
+
+	/* Reason for EA: one the ERROR_* constants */
+	unsigned int ea_reason;
+
+	/*
+	 * For EAs received at vector, the value read from ESR; for an EA
+	 * synchronized by ESB, the value of DISR.
+	 */
+	uint32_t syndrome;
+
+	/* For errors signalled via. interrupt, the raw interrupt ID; otherwise, 0. */
+	unsigned int interrupt;
+};
+
+/* Function to handle error from an error record group */
+typedef int (*err_record_handler_t)(const struct err_record_info *info,
+		int probe_data, const struct err_handler_data *const data);
+
+/* Error record information */
+struct err_record_info {
+	/* Function to probe error record group for errors */
+	err_record_probe_t probe;
+
+	/* Function to handle error record group errors */
+	err_record_handler_t handler;
+
+	/* Opaque group-specific data */
+	void *aux_data;
+
+	/* Additional information for Standard Error Records */
+	union {
+		struct {
+			/*
+			 * For a group accessed via. memory-mapped register,
+			 * base address of the page hosting error records, and
+			 * the size of the record group.
+			 */
+			uintptr_t base_addr;
+
+			/* Size of group in number of KBs */
+			unsigned int size_num_k;
+		} memmap;
+
+		struct {
+			/*
+			 * For error records accessed via. system register, index of
+			 * the error record.
+			 */
+			unsigned int idx_start;
+			unsigned int num_idx;
+		} sysreg;
+	};
+
+	/* Data structure version */
+	unsigned int version;
+
+	/* Error record access mechanism */
+	unsigned int access:1;
+};
+
+struct err_record_mapping {
+	struct err_record_info *err_records;
+	size_t num_err_records;
+};
+
+struct ras_interrupt_mapping {
+	struct ras_interrupt *intrs;
+	size_t num_intrs;
+};
+
+extern const struct err_record_mapping err_record_mapping;
+extern const struct ras_interrupt_mapping ras_interrupt_mapping;
+
+
+/*
+ * Helper functions to probe memory-mapped and system registers implemented in
+ * Standard Error Record format
+ */
+static inline int ras_err_ser_probe_memmap(const struct err_record_info *info,
+		int *probe_data)
+{
+	assert(info->version == ERR_HANDLER_VERSION);
+
+	return ser_probe_memmap(info->memmap.base_addr, info->memmap.size_num_k,
+		probe_data);
+}
+
+static inline int ras_err_ser_probe_sysreg(const struct err_record_info *info,
+		int *probe_data)
+{
+	assert(info->version == ERR_HANDLER_VERSION);
+
+	return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx,
+			probe_data);
+}
+
+int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+		void *handle, uint64_t flags);
+void ras_init(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __RAS_COMMON__ */
diff --git a/include/lib/extensions/ras_arch.h b/include/lib/extensions/ras_arch.h
new file mode 100644
index 0000000..7d21053
--- /dev/null
+++ b/include/lib/extensions/ras_arch.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __RAS_H__
+#define __RAS_H__
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context.h>
+#include <mmio.h>
+#include <stdint.h>
+
+/*
+ * Size of nodes implementing Standard Error Records - currently only 4k is
+ * supported.
+ */
+#define STD_ERR_NODE_SIZE_NUM_K		4
+
+/*
+ * Individual register offsets within an error record in Standard Error Record
+ * format when error records are accessed through memory-mapped registers.
+ */
+#define ERR_FR(n)	(0x0 + (64 * (n)))
+#define ERR_CTLR(n)	(0x8 + (64 * (n)))
+#define ERR_STATUS(n)	(0x10 + (64 * (n)))
+#define ERR_ADDR(n)	(0x18 + (64 * (n)))
+#define ERR_MISC0(n)	(0x20 + (64 * (n)))
+#define ERR_MISC1(n)	(0x28 + (64 * (n)))
+
+/* Group Status Register (ERR_STATUS) offset */
+#define ERR_GSR(base, size_num_k, n) \
+	((base) + (0x380 * (size_num_k)) + (8 * (n)))
+
+/* Management register offsets */
+#define ERR_DEVID(base, size_num_k) \
+	((base) + ((0x400 * (size_num_k)) - 0x100) + 0xc8)
+
+#define ERR_DEVID_MASK	0xffff
+
+/* Standard Error Record status register fields */
+#define ERR_STATUS_AV_SHIFT	31
+#define ERR_STATUS_AV_MASK	U(0x1)
+
+#define ERR_STATUS_V_SHIFT	30
+#define ERR_STATUS_V_MASK	U(0x1)
+
+#define ERR_STATUS_UE_SHIFT	29
+#define ERR_STATUS_UE_MASK	U(0x1)
+
+#define ERR_STATUS_ER_SHIFT	28
+#define ERR_STATUS_ER_MASK	U(0x1)
+
+#define ERR_STATUS_OF_SHIFT	27
+#define ERR_STATUS_OF_MASK	U(0x1)
+
+#define ERR_STATUS_MV_SHIFT	26
+#define ERR_STATUS_MV_MASK	U(0x1)
+
+#define ERR_STATUS_CE_SHIFT	24
+#define ERR_STATUS_CE_MASK	U(0x3)
+
+#define ERR_STATUS_DE_SHIFT	23
+#define ERR_STATUS_DE_MASK	U(0x1)
+
+#define ERR_STATUS_PN_SHIFT	22
+#define ERR_STATUS_PN_MASK	U(0x1)
+
+#define ERR_STATUS_UET_SHIFT	20
+#define ERR_STATUS_UET_MASK	U(0x3)
+
+#define ERR_STATUS_IERR_SHIFT	8
+#define ERR_STATUS_IERR_MASK	U(0xff)
+
+#define ERR_STATUS_SERR_SHIFT	0
+#define ERR_STATUS_SERR_MASK	U(0xff)
+
+#define ERR_STATUS_GET_FIELD(_status, _field) \
+	(((_status) >> ERR_STATUS_ ##_field ##_SHIFT) & ERR_STATUS_ ##_field ##_MASK)
+
+#define ERR_STATUS_CLR_FIELD(_status, _field) \
+	(_status) &= ~(ERR_STATUS_ ##_field ##_MASK << ERR_STATUS_ ##_field ##_SHIFT)
+
+#define ERR_STATUS_SET_FIELD(_status, _field, _value) \
+	(_status) |= (((_value) & ERR_STATUS_ ##_field ##_MASK) << ERR_STATUS_ ##_field ##_SHIFT)
+
+#define ERR_STATUS_WRITE_FIELD(_status, _field, _value) do { \
+		ERR_STATUS_CLR_FIELD(_status, _field, _value); \
+		ERR_STATUS_SET_FIELD(_status, _field, _value); \
+	} while (0)
+
+
+/* Standard Error Record control register fields */
+#define ERR_CTLR_WDUI_SHIFT	11
+#define ERR_CTLR_WDUI_MASK	0x1
+
+#define ERR_CTLR_RDUI_SHIFT	10
+#define ERR_CTLR_RDUI_MASK	0x1
+#define ERR_CTLR_DUI_SHIFT	ERR_CTLR_RDUI_SHIFT
+#define ERR_CTLR_DUI_MASK	ERR_CTLR_RDUI_MASK
+
+#define ERR_CTLR_WCFI_SHIFT	9
+#define ERR_CTLR_WCFI_MASK	0x1
+
+#define ERR_CTLR_RCFI_SHIFT	8
+#define ERR_CTLR_RCFI_MASK	0x1
+#define ERR_CTLR_CFI_SHIFT	ERR_CTLR_RCFI_SHIFT
+#define ERR_CTLR_CFI_MASK	ERR_CTLR_RCFI_MASK
+
+#define ERR_CTLR_WUE_SHIFT	7
+#define ERR_CTLR_WUE_MASK	0x1
+
+#define ERR_CTLR_WFI_SHIFT	6
+#define ERR_CTLR_WFI_MASK	0x1
+
+#define ERR_CTLR_WUI_SHIFT	5
+#define ERR_CTLR_WUI_MASK	0x1
+
+#define ERR_CTLR_RUE_SHIFT	4
+#define ERR_CTLR_RUE_MASK	0x1
+#define ERR_CTLR_UE_SHIFT	ERR_CTLR_RUE_SHIFT
+#define ERR_CTLR_UE_MASK	ERR_CTLR_RUE_MASK
+
+#define ERR_CTLR_RFI_SHIFT	3
+#define ERR_CTLR_RFI_MASK	0x1
+#define ERR_CTLR_FI_SHIFT	ERR_CTLR_RFI_SHIFT
+#define ERR_CTLR_FI_MASK	ERR_CTLR_RFI_MASK
+
+#define ERR_CTLR_RUI_SHIFT	2
+#define ERR_CTLR_RUI_MASK	0x1
+#define ERR_CTLR_UI_SHIFT	ERR_CTLR_RUI_SHIFT
+#define ERR_CTLR_UI_MASK	ERR_CTLR_RUI_MASK
+
+#define ERR_CTLR_ED_SHIFT	0
+#define ERR_CTLR_ED_MASK	0x1
+
+#define ERR_CTLR_CLR_FIELD(_ctlr, _field) \
+	(_ctlr) &= ~(ERR_CTLR_ ##_field _MASK << ERR_CTLR_ ##_field ##_SHIFT)
+
+#define ERR_CTLR_SET_FIELD(_ctlr, _field, _value) \
+	(_ctlr) |= (((_value) & ERR_CTLR_ ##_field ##_MASK) << ERR_CTLR_ ##_field ##_SHIFT)
+
+#define ERR_CTLR_ENABLE_FIELD(_ctlr, _field) \
+	ERR_CTLR_SET_FIELD(_ctlr, _field, ERR_CTLR_ ##_field ##_MASK)
+
+/* Uncorrected error types */
+#define ERROR_STATUS_UET_UC	0x0	/* Uncontainable */
+#define ERROR_STATUS_UET_UEU	0x1	/* Unrecoverable */
+#define ERROR_STATUS_UET_UEO	0x2	/* Restable */
+#define ERROR_STATUS_UET_UER	0x3	/* Recoverable */
+
+
+/*
+ * Standard Error Record accessors for memory-mapped registers.
+ */
+
+static inline uint64_t ser_get_feature(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_FR(idx));
+}
+
+static inline uint64_t ser_get_control(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_CTLR(idx));
+}
+
+static inline uint64_t ser_get_status(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_STATUS(idx));
+}
+
+/*
+ * Error handling agent would write to the status register to clear an
+ * identified/handled error. Most fields in the status register are
+ * conditional write-one-to-clear.
+ *
+ * Typically, to clear the status, it suffices to write back the same value
+ * previously read. However, if there were new, higher-priority errors recorded
+ * on the node since status was last read, writing read value won't clear the
+ * status. Therefore, an error handling agent must wait on and verify the status
+ * has indeed been cleared.
+ */
+static inline void ser_set_status(uintptr_t base, unsigned int idx,
+		uint64_t status)
+{
+	mmio_write_64(base + ERR_STATUS(idx), status);
+}
+
+static inline uint64_t ser_get_addr(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_ADDR(idx));
+}
+
+static inline uint64_t ser_get_misc0(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_MISC0(idx));
+}
+
+static inline uint64_t ser_get_misc1(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_MISC1(idx));
+}
+
+
+/*
+ * Standard Error Record helpers for System registers.
+ */
+static inline void ser_sys_select_record(unsigned int idx)
+{
+	unsigned int max_idx __unused = read_erridr_el1() & ERRIDR_MASK;
+
+	assert(idx < max_idx);
+
+	write_errselr_el1(idx);
+	isb();
+}
+
+/* Library functions to probe Standard Error Record */
+int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data);
+int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_data);
+
+#endif /* __RAS_H__ */
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
index 8abc73c..31b1294 100644
--- a/include/lib/utils_def.h
+++ b/include/lib/utils_def.h
@@ -68,6 +68,13 @@
 	(((ptr) > UINTPTR_MAX - (inc)) ? 1 : 0)
 
 /*
+ * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
+ * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
+ */
+#define check_u32_overflow(u32, inc) \
+	((u32) > (UINT32_MAX - (inc)) ? 1 : 0)
+
+/*
  * For those constants to be shared between C and other sources, apply a 'u'
  * or 'ull' suffix to the argument only in C, to avoid undefined or unintended
  * behaviour.
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index 95e986b..4473b53 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -245,10 +245,16 @@
  * The number of regions like RO(code), coherent and data required by
  * different BL stages which need to be mapped in the MMU.
  */
-#if USE_COHERENT_MEM
-#define ARM_BL_REGIONS			3
+#if ENABLE_SPM && defined(IMAGE_BL31)
+# if USE_COHERENT_MEM
+#  define ARM_BL_REGIONS		5
+# else
+#  define ARM_BL_REGIONS		4
+# endif
+#elif USE_COHERENT_MEM
+# define ARM_BL_REGIONS			4
 #else
-#define ARM_BL_REGIONS			2
+# define ARM_BL_REGIONS			3
 #endif
 
 #define MAX_MMAP_REGIONS		(PLAT_ARM_MMAP_ENTRIES +	\
@@ -483,6 +489,7 @@
 #define PLAT_PERCPU_BAKERY_LOCK_SIZE		(1 * CACHE_WRITEBACK_GRANULE)
 
 /* Priority levels for ARM platforms */
+#define PLAT_RAS_PRI			0x10
 #define PLAT_SDEI_CRITICAL_PRI		0x60
 #define PLAT_SDEI_NORMAL_PRI		0x70
 
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
index 612a63a..fc3f4ec 100644
--- a/include/plat/arm/common/plat_arm.h
+++ b/include/plat/arm/common/plat_arm.h
@@ -59,9 +59,11 @@
 		PLAT_ARM_TZC_NS_DEV_ACCESS}
 #endif
 
-#define ARM_CASSERT_MMAP						\
-	CASSERT((ARRAY_SIZE(plat_arm_mmap) + ARM_BL_REGIONS)		\
-		<= MAX_MMAP_REGIONS,					\
+#define ARM_CASSERT_MMAP						  \
+	CASSERT((ARRAY_SIZE(plat_arm_mmap) - 1) <= PLAT_ARM_MMAP_ENTRIES, \
+		assert_plat_arm_mmap_mismatch);				  \
+	CASSERT((PLAT_ARM_MMAP_ENTRIES + ARM_BL_REGIONS)		  \
+		<= MAX_MMAP_REGIONS,					  \
 		assert_max_mmap_regions);
 
 /*
diff --git a/include/plat/arm/css/common/css_def.h b/include/plat/arm/css/common/css_def.h
index deea1bb..6d68b44 100644
--- a/include/plat/arm/css/common/css_def.h
+++ b/include/plat/arm/css/common/css_def.h
@@ -22,9 +22,6 @@
 #define CSS_DEVICE_BASE			0x20000000
 #define CSS_DEVICE_SIZE			0x0e000000
 
-#define NSRAM_BASE			0x2e000000
-#define NSRAM_SIZE			0x00008000
-
 /* System Security Control Registers */
 #define SSC_REG_BASE			0x2a420000
 #define SSC_GPRETN			(SSC_REG_BASE + 0x030)
@@ -102,7 +99,7 @@
 #define CSS_MAP_NSRAM			MAP_REGION_FLAT(		\
 						NSRAM_BASE,	\
 						NSRAM_SIZE,	\
-						MT_DEVICE | MT_RW | MT_SECURE)
+						MT_DEVICE | MT_RW | MT_NS)
 
 /* Platform ID address */
 #define SSC_VERSION_OFFSET			0x040
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index aa181c8..cd17a00 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -124,6 +124,9 @@
 void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr);
 #endif
 
+void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+		void *handle, uint64_t flags);
+
 /*
  * The following function is mandatory when the
  * firmware update feature is used.
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index a6225d4..aee4fee 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,10 +7,136 @@
 #include <arch.h>
 #include <asm_macros.S>
 #include <assert_macros.S>
+#include <context.h>
 #include <denver.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
+	/* -------------------------------------------------
+	 * CVE-2017-5715 mitigation
+	 *
+	 * Flush the indirect branch predictor and RSB on
+	 * entry to EL3 by issuing a newly added instruction
+	 * for Denver CPUs.
+	 *
+	 * To achieve this without performing any branch
+	 * instruction, a per-cpu vbar is installed which
+	 * executes the workaround and then branches off to
+	 * the corresponding vector entry in the main vector
+	 * table.
+	 * -------------------------------------------------
+	 */
+	.globl	workaround_bpflush_runtime_exceptions
+
+vector_base workaround_bpflush_runtime_exceptions
+
+	.macro	apply_workaround
+	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+
+	/* -------------------------------------------------
+	 * A new write-only system register where a write of
+	 * 1 to bit 0 will cause the indirect branch predictor
+	 * and RSB to be flushed.
+	 *
+	 * A write of 0 to bit 0 will be ignored. A write of
+	 * 1 to any other bit will cause an MCA.
+	 * -------------------------------------------------
+	 */
+	mov	x0, #1
+	msr	s3_0_c15_c0_6, x0
+	isb
+
+	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	.endm
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry workaround_bpflush_sync_exception_sp_el0
+	b	sync_exception_sp_el0
+	check_vector_size workaround_bpflush_sync_exception_sp_el0
+
+vector_entry workaround_bpflush_irq_sp_el0
+	b	irq_sp_el0
+	check_vector_size workaround_bpflush_irq_sp_el0
+
+vector_entry workaround_bpflush_fiq_sp_el0
+	b	fiq_sp_el0
+	check_vector_size workaround_bpflush_fiq_sp_el0
+
+vector_entry workaround_bpflush_serror_sp_el0
+	b	serror_sp_el0
+	check_vector_size workaround_bpflush_serror_sp_el0
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry workaround_bpflush_sync_exception_sp_elx
+	b	sync_exception_sp_elx
+	check_vector_size workaround_bpflush_sync_exception_sp_elx
+
+vector_entry workaround_bpflush_irq_sp_elx
+	b	irq_sp_elx
+	check_vector_size workaround_bpflush_irq_sp_elx
+
+vector_entry workaround_bpflush_fiq_sp_elx
+	b	fiq_sp_elx
+	check_vector_size workaround_bpflush_fiq_sp_elx
+
+vector_entry workaround_bpflush_serror_sp_elx
+	b	serror_sp_elx
+	check_vector_size workaround_bpflush_serror_sp_elx
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry workaround_bpflush_sync_exception_aarch64
+	apply_workaround
+	b	sync_exception_aarch64
+	check_vector_size workaround_bpflush_sync_exception_aarch64
+
+vector_entry workaround_bpflush_irq_aarch64
+	apply_workaround
+	b	irq_aarch64
+	check_vector_size workaround_bpflush_irq_aarch64
+
+vector_entry workaround_bpflush_fiq_aarch64
+	apply_workaround
+	b	fiq_aarch64
+	check_vector_size workaround_bpflush_fiq_aarch64
+
+vector_entry workaround_bpflush_serror_aarch64
+	apply_workaround
+	b	serror_aarch64
+	check_vector_size workaround_bpflush_serror_aarch64
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry workaround_bpflush_sync_exception_aarch32
+	apply_workaround
+	b	sync_exception_aarch32
+	check_vector_size workaround_bpflush_sync_exception_aarch32
+
+vector_entry workaround_bpflush_irq_aarch32
+	apply_workaround
+	b	irq_aarch32
+	check_vector_size workaround_bpflush_irq_aarch32
+
+vector_entry workaround_bpflush_fiq_aarch32
+	apply_workaround
+	b	fiq_aarch32
+	check_vector_size workaround_bpflush_fiq_aarch32
+
+vector_entry workaround_bpflush_serror_aarch32
+	apply_workaround
+	b	serror_aarch32
+	check_vector_size workaround_bpflush_serror_aarch32
+
 	.global	denver_disable_dco
 
 	/* ---------------------------------------------
@@ -71,6 +197,23 @@
 
 	mov	x19, x30
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+	/*
+	 * Check if the CPU supports the special instruction
+	 * required to flush the indirect branch predictor and
+	 * RSB. Support for this operation can be determined by
+	 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
+	 */
+	mrs	x0, id_afr0_el1
+	mov	x1, #0x10000
+	and	x0, x0, x1
+	cmp	x0, #0
+	adr	x1, workaround_bpflush_runtime_exceptions
+	mrs	x2, vbar_el3
+	csel	x0, x1, x2, ne
+	msr	vbar_el3, x0
+#endif
+
 	/* ----------------------------------------------------
 	 * Enable dynamic code optimizer (DCO)
 	 * ----------------------------------------------------
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 620ec16..121ca4d 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,8 +15,8 @@
 	.global	fpregs_context_restore
 #endif
 	.global	save_gp_registers
+	.global	restore_gp_registers
 	.global	restore_gp_registers_eret
-	.global	restore_gp_registers_callee_eret
 	.global	el3_exit
 
 /* -----------------------------------------------------
@@ -332,30 +332,50 @@
 	ret
 endfunc save_gp_registers
 
-func restore_gp_registers_eret
+/*
+ * This function restores all general purpose registers except x30 from the
+ * CPU context. x30 register must be explicitly restored by the caller.
+ */
+func restore_gp_registers
 	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-	b	restore_gp_registers_callee_eret
-endfunc restore_gp_registers_eret
-
-func restore_gp_registers_callee_eret
 	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
 	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
 	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
 	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
 	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
 	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
 	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
 	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
 	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
 	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
 	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
+	msr	sp_el0, x28
 	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-	ldp	x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-	msr	sp_el0, x17
-	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+	ret
+endfunc restore_gp_registers
+
+/*
+ * Restore general purpose registers (including x30), and exit EL3 via. ERET to
+ * a lower exception level.
+ */
+func restore_gp_registers_eret
+	bl	restore_gp_registers
+	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+#if IMAGE_BL31 && RAS_EXTENSION
+	/*
+	 * Issue Error Synchronization Barrier to synchronize SErrors before
+	 * exiting EL3. We're running with EAs unmasked, so any synchronized
+	 * errors would be taken immediately; therefore no need to inspect
+	 * DISR_EL1 register.
+	 */
+	esb
+#endif
 	eret
-endfunc	restore_gp_registers_callee_eret
+endfunc	restore_gp_registers_eret
 
 	/* -----------------------------------------------------
 	 * This routine assumes that the SP_EL3 is pointing to
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 2608d1f..d8267e2 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -65,7 +65,7 @@
 	uint32_t scr_el3, pmcr_el0;
 	el3_state_t *state;
 	gp_regs_t *gp_regs;
-	unsigned long sctlr_elx;
+	unsigned long sctlr_elx, actlr_elx;
 
 	assert(ctx);
 
@@ -114,6 +114,11 @@
 	scr_el3 &= ~SCR_EA_BIT;
 #endif
 
+#if FAULT_INJECTION_SUPPORT
+	/* Enable fault injection from lower ELs */
+	scr_el3 |= SCR_FIEN_BIT;
+#endif
+
 #ifdef IMAGE_BL31
 	/*
 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ rounting as
@@ -173,6 +178,16 @@
 	 */
 	write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
 
+	/*
+	 * Base the context ACTLR_EL1 on the current value, as it is
+	 * implementation defined. The context restore process will write
+	 * the value from the context to the actual register and can cause
+	 * problems for processor cores that don't expect certain bits to
+	 * be zero.
+	 */
+	actlr_elx = read_actlr_el1();
+	write_ctx_reg((get_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
+
 	if (security_state == SECURE) {
 		/*
 		 * Initialise PMCR_EL0 for secure context only, setting all
diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c
new file mode 100644
index 0000000..0335a7b
--- /dev/null
+++ b/lib/extensions/ras/ras_common.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <ea_handle.h>
+#include <ehf.h>
+#include <platform.h>
+#include <ras.h>
+#include <ras_arch.h>
+
+#ifndef PLAT_RAS_PRI
+# error Platform must define RAS priority value
+#endif
+
+/* Handler that receives External Aborts on RAS-capable systems */
+int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+		void *handle, uint64_t flags)
+{
+	unsigned int i, n_handled = 0, ret;
+	int probe_data;
+	struct err_record_info *info;
+
+	const struct err_handler_data err_data = {
+		.version = ERR_HANDLER_VERSION,
+		.ea_reason = ea_reason,
+		.interrupt = 0,
+		.syndrome = syndrome,
+		.flags = flags,
+		.cookie = cookie,
+		.handle = handle
+	};
+
+	for_each_err_record_info(i, info) {
+		assert(info->probe != NULL);
+		assert(info->handler != NULL);
+
+		/* Continue probing until the record group signals no error */
+		while (1) {
+			if (info->probe(info, &probe_data) == 0)
+				break;
+
+			/* Handle error */
+			ret = info->handler(info, probe_data, &err_data);
+			if (ret != 0)
+				return ret;
+
+			n_handled++;
+		}
+	}
+
+	return (n_handled != 0);
+}
+
+#if ENABLE_ASSERTIONS
+static void assert_interrupts_sorted(void)
+{
+	unsigned int i, last;
+	struct ras_interrupt *start = ras_interrupt_mapping.intrs;
+
+	if (ras_interrupt_mapping.num_intrs == 0)
+		return;
+
+	last = start[0].intr_number;
+	for (i = 1; i < ras_interrupt_mapping.num_intrs; i++) {
+		assert(start[i].intr_number > last);
+		last = start[i].intr_number;
+	}
+}
+#endif
+
+/*
+ * Given an RAS interrupt number, locate the registered handler and call it. If
+ * no handler was found for the interrupt number, this function panics.
+ */
+static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags,
+		void *handle, void *cookie)
+{
+	struct ras_interrupt *ras_inrs = ras_interrupt_mapping.intrs;
+	struct ras_interrupt *selected = NULL;
+	int start, end, mid, probe_data, ret __unused;
+
+	const struct err_handler_data err_data = {
+		.version = ERR_HANDLER_VERSION,
+		.interrupt = intr_raw,
+		.flags = flags,
+		.cookie = cookie,
+		.handle = handle
+	};
+
+	assert(ras_interrupt_mapping.num_intrs > 0);
+
+	start = 0;
+	end = ras_interrupt_mapping.num_intrs;
+	while (start <= end) {
+		mid = ((end + start) / 2);
+		if (intr_raw == ras_inrs[mid].intr_number) {
+			selected = &ras_inrs[mid];
+			break;
+		} else if (intr_raw < ras_inrs[mid].intr_number) {
+			/* Move left */
+			end = mid - 1;
+		} else {
+			/* Move right */
+			start = mid + 1;
+		}
+	}
+
+	if (selected == NULL) {
+		ERROR("RAS interrupt %u has no handler!\n", intr_raw);
+		panic();
+	}
+
+
+	ret = selected->err_record->probe(selected->err_record, &probe_data);
+	assert(ret != 0);
+
+	/* Call error handler for the record group */
+	assert(selected->err_record->handler != NULL);
+	selected->err_record->handler(selected->err_record, probe_data,
+			&err_data);
+
+	return 0;
+}
+
+void ras_init(void)
+{
+#if ENABLE_ASSERTIONS
+	/* Check RAS interrupts are sorted */
+	assert_interrupts_sorted();
+#endif
+
+	/* Register RAS priority handler */
+	ehf_register_priority_handler(PLAT_RAS_PRI, ras_interrupt_handler);
+}
diff --git a/lib/extensions/ras/std_err_record.c b/lib/extensions/ras/std_err_record.c
new file mode 100644
index 0000000..65c007f
--- /dev/null
+++ b/lib/extensions/ras/std_err_record.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <ras_arch.h>
+#include <utils_def.h>
+
+/*
+ * Probe for error in memory-mapped registers containing error records
+ * implemented Standard Error Record format. Upon detecting an error, set probe
+ * data to the index of the record in error, and return 1; otherwise, return 0.
+ */
+int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data)
+{
+	int num_records, num_group_regs, i;
+	uint64_t gsr;
+
+	assert(base != 0);
+
+	/* Only 4K supported for now */
+	assert(size_num_k == STD_ERR_NODE_SIZE_NUM_K);
+
+	num_records = (mmio_read_32(ERR_DEVID(base, size_num_k)) & ERR_DEVID_MASK);
+
+	/* A group register shows error status for 2^6 error records */
+	num_group_regs = (num_records >> 6) + 1;
+
+	/* Iterate through group registers to find a record in error */
+	for (i = 0; i < num_group_regs; i++) {
+		gsr = mmio_read_64(ERR_GSR(base, size_num_k, i));
+		if (gsr == 0)
+			continue;
+
+		/* Return the index of the record in error */
+		if (probe_data != NULL)
+			*probe_data = ((i << 6) + __builtin_ctz(gsr));
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Probe for error in System Registers where error records are implemented in
+ * Standard Error Record format. Upon detecting an error, set probe data to the
+ * index of the record in error, and return 1; otherwise, return 0.
+ */
+int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_data)
+{
+	int i;
+	uint64_t status;
+	unsigned int max_idx __unused = read_erridr_el1() & ERRIDR_MASK;
+
+	assert(idx_start < max_idx);
+	assert(check_u32_overflow(idx_start, num_idx) == 0);
+	assert((idx_start + num_idx - 1) < max_idx);
+
+	for (i = 0; i < num_idx; i++) {
+		/* Select the error record */
+		ser_sys_select_record(idx_start + i);
+
+		/* Retrieve status register from the error record */
+		status = read_erxstatus_el1();
+
+		/* Check for valid field in status */
+		if (ERR_STATUS_GET_FIELD(status, V)) {
+			if (probe_data != NULL)
+				*probe_data = i;
+			return 1;
+		}
+	}
+
+	return 0;
+}
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 9e95cd5..4bbff03 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -76,6 +76,9 @@
 # Build flag to treat usage of deprecated platform and framework APIs as error.
 ERROR_DEPRECATED		:= 0
 
+# Fault injection support
+FAULT_INJECTION_SUPPORT		:= 0
+
 # Byte alignment that each component in FIP is aligned to
 FIP_ALIGN			:= 0
 
@@ -92,6 +95,10 @@
 # default, they are for Secure EL1.
 GICV2_G0_FOR_EL3		:= 0
 
+# Route External Aborts to EL3. Disabled by default; External Aborts are handled
+# by lower ELs.
+HANDLE_EA_EL3_FIRST		:= 0
+
 # Whether system coherency is managed in hardware, without explicit software
 # operations.
 HW_ASSISTED_COHERENCY		:= 0
@@ -120,6 +127,9 @@
 # Original format.
 PSCI_EXTENDED_STATE_ID		:= 0
 
+# Enable RAS support
+RAS_EXTENSION			:= 0
+
 # By default, BL1 acts as the reset handler, not BL31
 RESET_TO_BL31			:= 0
 
diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h
index c834941..e616e1f 100644
--- a/plat/arm/board/juno/include/platform_def.h
+++ b/plat/arm/board/juno/include/platform_def.h
@@ -56,6 +56,9 @@
 /* Use the bypass address */
 #define PLAT_ARM_TRUSTED_ROM_BASE	V2M_FLASH0_BASE + BL1_ROM_BYPASS_OFFSET
 
+#define NSRAM_BASE			0x2e000000
+#define NSRAM_SIZE			0x00008000	/* 32KB */
+
 /* virtual address used by dynamic mem_protect for chunk_base */
 #define PLAT_ARM_MEM_PROTEC_VA_FRAME	0xc0000000
 
diff --git a/plat/arm/common/aarch64/arm_ehf.c b/plat/arm/common/aarch64/arm_ehf.c
index 785b7bb..665871b 100644
--- a/plat/arm/common/aarch64/arm_ehf.c
+++ b/plat/arm/common/aarch64/arm_ehf.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,11 @@
  * Enumeration of priority levels on ARM platforms.
  */
 ehf_pri_desc_t arm_exceptions[] = {
+#if RAS_EXTENSION
+	/* RAS Priority */
+	EHF_PRI_DESC(ARM_PRI_BITS, PLAT_RAS_PRI),
+#endif
+
 #if SDEI_SUPPORT
 	/* Critical priority SDEI */
 	EHF_PRI_DESC(ARM_PRI_BITS, PLAT_SDEI_CRITICAL_PRI),
diff --git a/plat/arm/common/aarch64/arm_ras.c b/plat/arm/common/aarch64/arm_ras.c
new file mode 100644
index 0000000..80dfaf1
--- /dev/null
+++ b/plat/arm/common/aarch64/arm_ras.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <ras.h>
+
+struct ras_interrupt arm_ras_interrupts[] = {
+};
+
+struct err_record_info arm_err_records[] = {
+};
+
+REGISTER_ERR_RECORD_INFO(arm_err_records);
+REGISTER_RAS_INTERRUPTS(arm_ras_interrupts);
diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c
index 3c70f9d..551e700 100644
--- a/plat/arm/common/arm_bl31_setup.c
+++ b/plat/arm/common/arm_bl31_setup.c
@@ -14,6 +14,7 @@
 #include <mmio.h>
 #include <plat_arm.h>
 #include <platform.h>
+#include <ras.h>
 
 #define BL31_END (uintptr_t)(&__BL31_END__)
 
@@ -80,7 +81,7 @@
 	assert(from_bl2 == NULL);
 	assert(plat_params_from_bl2 == NULL);
 
-#ifdef BL32_BASE
+# ifdef BL32_BASE
 	/* Populate entry point information for BL32 */
 	SET_PARAM_HEAD(&bl32_image_ep_info,
 				PARAM_EP,
@@ -89,7 +90,7 @@
 	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
 	bl32_image_ep_info.pc = BL32_BASE;
 	bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry();
-#endif /* BL32_BASE */
+# endif /* BL32_BASE */
 
 	/* Populate entry point information for BL33 */
 	SET_PARAM_HEAD(&bl33_image_ep_info,
@@ -105,6 +106,19 @@
 	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
 
+# if ARM_LINUX_KERNEL_AS_BL33
+	/*
+	 * According to the file ``Documentation/arm64/booting.txt`` of the
+	 * Linux kernel tree, Linux expects the physical address of the device
+	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
+	 * must be 0.
+	 */
+	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
+	bl33_image_ep_info.args.arg1 = 0U;
+	bl33_image_ep_info.args.arg2 = 0U;
+	bl33_image_ep_info.args.arg3 = 0U;
+# endif
+
 #else /* RESET_TO_BL31 */
 
 	/*
@@ -221,6 +235,10 @@
 
 	/* Initialize power controller before setting up topology */
 	plat_arm_pwrc_setup();
+
+#if RAS_EXTENSION
+	ras_init();
+#endif
 }
 
 /*******************************************************************************
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 015e454..1218548 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -80,6 +80,27 @@
 $(eval $(call assert_boolean,ARM_XLAT_TABLES_LIB_V1))
 $(eval $(call add_define,ARM_XLAT_TABLES_LIB_V1))
 
+# Don't have the Linux kernel as a BL33 image by default
+ARM_LINUX_KERNEL_AS_BL33	:=	0
+$(eval $(call assert_boolean,ARM_LINUX_KERNEL_AS_BL33))
+$(eval $(call add_define,ARM_LINUX_KERNEL_AS_BL33))
+
+ifeq (${ARM_LINUX_KERNEL_AS_BL33},1)
+  ifneq (${ARCH},aarch64)
+    $(error "ARM_LINUX_KERNEL_AS_BL33 is only available in AArch64.")
+  endif
+  ifneq (${RESET_TO_BL31},1)
+    $(error "ARM_LINUX_KERNEL_AS_BL33 is only available if RESET_TO_BL31=1.")
+  endif
+  ifndef PRELOADED_BL33_BASE
+    $(error "PRELOADED_BL33_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is used.")
+  endif
+  ifndef ARM_PRELOADED_DTB_BASE
+    $(error "ARM_PRELOADED_DTB_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is used.")
+  endif
+  $(eval $(call add_define,ARM_PRELOADED_DTB_BASE))
+endif
+
 # Use an implementation of SHA-256 with a smaller memory footprint but reduced
 # speed.
 $(eval $(call add_define,MBEDTLS_SHA256_SMALLER))
@@ -203,6 +224,13 @@
 BL31_SOURCES		+=	plat/arm/common/aarch64/arm_sdei.c
 endif
 
+# RAS sources
+ifeq (${RAS_EXTENSION},1)
+BL31_SOURCES		+=	lib/extensions/ras/std_err_record.c		\
+				lib/extensions/ras/ras_common.c \
+				plat/arm/common/aarch64/arm_ras.c
+endif
+
 ifneq (${TRUSTED_BOARD_BOOT},0)
 
     # Include common TBB sources
diff --git a/plat/arm/css/sgi/include/platform_def.h b/plat/arm/css/sgi/include/platform_def.h
index 62f4059..49a33ad 100644
--- a/plat/arm/css/sgi/include/platform_def.h
+++ b/plat/arm/css/sgi/include/platform_def.h
@@ -52,6 +52,9 @@
 #define PLAT_ARM_TRUSTED_ROM_BASE	0x0
 #define PLAT_ARM_TRUSTED_ROM_SIZE	0x00080000	/* 512KB */
 
+#define PLAT_ARM_NSRAM_BASE		0x06000000
+#define PLAT_ARM_NSRAM_SIZE		0x00080000	/* 512KB */
+
 #define PLAT_MAX_PWR_LVL		1
 
 #define PLAT_ARM_G1S_IRQS		ARM_G1S_IRQS,			\
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
index 7a2f38c..409ae55 100644
--- a/plat/common/aarch64/plat_common.c
+++ b/plat/common/aarch64/plat_common.c
@@ -8,6 +8,9 @@
 #include <assert.h>
 #include <console.h>
 #include <platform.h>
+#if RAS_EXTENSION
+#include <ras.h>
+#endif
 #include <xlat_mmu_helpers.h>
 
 /*
@@ -28,6 +31,8 @@
 #pragma weak plat_sdei_validate_entry_point
 #endif
 
+#pragma weak plat_ea_handler
+
 void bl31_plat_enable_mmu(uint32_t flags)
 {
 	enable_mmu_el3(flags);
@@ -105,3 +110,20 @@
 	return 0;
 }
 #endif
+
+/* RAS functions common to AArch64 ARM platforms */
+void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+		void *handle, uint64_t flags)
+{
+#if RAS_EXTENSION
+	/* Call RAS EA handler */
+	int handled = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags);
+	if (handled != 0)
+		return;
+#endif
+
+	ERROR("Unhandled External Abort received on 0x%lx at EL3!\n",
+			read_mpidr_el1());
+	ERROR(" exception reason=%u syndrome=0x%llx\n", ea_reason, syndrome);
+	panic();
+}