Merge pull request #1379 from CJKay/nsram-fix

Fix incorrect NSRAM memory map region for SGI-575
diff --git a/Makefile b/Makefile
index 1708a4c..b7116a7 100644
--- a/Makefile
+++ b/Makefile
@@ -387,6 +387,20 @@
     endif
 endif
 
+# For RAS_EXTENSION, require that EAs are handled in EL3 first
+ifeq ($(RAS_EXTENSION),1)
+    ifneq ($(HANDLE_EA_EL3_FIRST),1)
+        $(error For RAS_EXTENSION, HANDLE_EA_EL3_FIRST must also be 1)
+    endif
+endif
+
+# When FAULT_INJECTION_SUPPORT is used, require that RAS_EXTENSION is enabled
+ifeq ($(FAULT_INJECTION_SUPPORT),1)
+    ifneq ($(RAS_EXTENSION),1)
+        $(error For FAULT_INJECTION_SUPPORT, RAS_EXTENSION must also be 1)
+    endif
+endif
+
 ################################################################################
 # Process platform overrideable behaviour
 ################################################################################
@@ -514,8 +528,10 @@
 $(eval $(call assert_boolean,ENABLE_SPM))
 $(eval $(call assert_boolean,ENABLE_SVE_FOR_NS))
 $(eval $(call assert_boolean,ERROR_DEPRECATED))
+$(eval $(call assert_boolean,FAULT_INJECTION_SUPPORT))
 $(eval $(call assert_boolean,GENERATE_COT))
 $(eval $(call assert_boolean,GICV2_G0_FOR_EL3))
+$(eval $(call assert_boolean,HANDLE_EA_EL3_FIRST))
 $(eval $(call assert_boolean,HW_ASSISTED_COHERENCY))
 $(eval $(call assert_boolean,LOAD_IMAGE_V2))
 $(eval $(call assert_boolean,MULTI_CONSOLE_API))
@@ -523,6 +539,7 @@
 $(eval $(call assert_boolean,PL011_GENERIC_UART))
 $(eval $(call assert_boolean,PROGRAMMABLE_RESET_ADDRESS))
 $(eval $(call assert_boolean,PSCI_EXTENDED_STATE_ID))
+$(eval $(call assert_boolean,RAS_EXTENSION))
 $(eval $(call assert_boolean,RESET_TO_BL31))
 $(eval $(call assert_boolean,SAVE_KEYS))
 $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA))
@@ -561,7 +578,9 @@
 $(eval $(call add_define,ENABLE_SPM))
 $(eval $(call add_define,ENABLE_SVE_FOR_NS))
 $(eval $(call add_define,ERROR_DEPRECATED))
+$(eval $(call add_define,FAULT_INJECTION_SUPPORT))
 $(eval $(call add_define,GICV2_G0_FOR_EL3))
+$(eval $(call add_define,HANDLE_EA_EL3_FIRST))
 $(eval $(call add_define,HW_ASSISTED_COHERENCY))
 $(eval $(call add_define,LOAD_IMAGE_V2))
 $(eval $(call add_define,LOG_LEVEL))
@@ -571,6 +590,7 @@
 $(eval $(call add_define,PLAT_${PLAT}))
 $(eval $(call add_define,PROGRAMMABLE_RESET_ADDRESS))
 $(eval $(call add_define,PSCI_EXTENDED_STATE_ID))
+$(eval $(call add_define,RAS_EXTENSION))
 $(eval $(call add_define,RESET_TO_BL31))
 $(eval $(call add_define,SEPARATE_CODE_AND_RODATA))
 $(eval $(call add_define,SMCCC_MAJOR_VERSION))
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 1c3ed3f..346cd3b 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -8,6 +8,7 @@
 #include <asm_macros.S>
 #include <context.h>
 #include <cpu_data.h>
+#include <ea_handle.h>
 #include <interrupt_mgmt.h>
 #include <platform_def.h>
 #include <runtime_svc.h>
@@ -35,17 +36,77 @@
 	.globl	fiq_aarch32
 	.globl	serror_aarch32
 
+	/*
+	 * Macro that prepares entry to EL3 upon taking an exception.
+	 *
+	 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
+	 * instruction. When an error is thus synchronized, the handling is
+	 * delegated to platform EA handler.
+	 *
+	 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
+	 * Asynchronous External Aborts.
+	 */
+	.macro check_and_unmask_ea
+#if RAS_EXTENSION
+	/* Synchronize pending External Aborts */
+	esb
+
+	/* Unmask the SError interrupt */
+	msr	daifclr, #DAIF_ABT_BIT
+
+	/*
+	 * Explicitly save x30 so as to free up a register and to enable
+	 * branching
+	 */
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+	/* Check for SErrors synchronized by the ESB instruction */
+	mrs	x30, DISR_EL1
+	tbz	x30, #DISR_A_BIT, 1f
+
+	/* Save GP registers and restore them afterwards */
+	bl	save_gp_registers
+	mov	x0, #ERROR_EA_ESB
+	mrs	x1, DISR_EL1
+	bl	delegate_ea
+	bl	restore_gp_registers
+
+1:
+#else
+	/* Unmask the SError interrupt */
+	msr	daifclr, #DAIF_ABT_BIT
+
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+#endif
+	.endm
+
+	/*
+	 * Handle External Abort by delegating to the platform's EA handler.
+	 * Once the platform handler returns, the macro exits EL3 and returns to
+	 * where the abort was taken from.
+	 *
+	 * This macro assumes that x30 is available for use.
+	 *
+	 * 'abort_type' is a constant passed to the platform handler, indicating
+	 * the cause of the External Abort.
+	 */
+	.macro handle_ea abort_type
+	/* Save GP registers */
+	bl	save_gp_registers
+
+	/* Setup exception class and syndrome arguments for platform handler */
+	mov	x0, \abort_type
+	mrs	x1, esr_el3
+	adr	x30, el3_exit
+	b	delegate_ea
+	.endm
+
 	/* ---------------------------------------------------------------------
 	 * This macro handles Synchronous exceptions.
 	 * Only SMC exceptions are supported.
 	 * ---------------------------------------------------------------------
 	 */
 	.macro	handle_sync_exception
-	/* Enable the SError interrupt */
-	msr	daifclr, #DAIF_ABT_BIT
-
-	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-
 #if ENABLE_RUNTIME_INSTRUMENTATION
 	/*
 	 * Read the timestamp value and store it in per-cpu data. The value
@@ -69,6 +130,20 @@
 	cmp	x30, #EC_AARCH64_SMC
 	b.eq	smc_handler64
 
+	/* Check for I/D aborts from lower EL */
+	cmp	x30, #EC_IABORT_LOWER_EL
+	b.eq	1f
+
+	cmp	x30, #EC_DABORT_LOWER_EL
+	b.ne	2f
+
+1:
+	/* Test for EA bit in the instruction syndrome */
+	mrs	x30, esr_el3
+	tbz	x30, #ESR_ISS_EABORT_EA_BIT, 2f
+	handle_ea #ERROR_EA_SYNC
+
+2:
 	/* Other kinds of synchronous exceptions are not handled */
 	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 	b	report_unhandled_exception
@@ -81,12 +156,7 @@
 	 * ---------------------------------------------------------------------
 	 */
 	.macro	handle_interrupt_exception label
-	/* Enable the SError interrupt */
-	msr	daifclr, #DAIF_ABT_BIT
-
-	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 	bl	save_gp_registers
-
 	/* Save the EL3 system registers needed to return from this exception */
 	mrs	x0, spsr_el3
 	mrs	x1, elr_el3
@@ -154,25 +224,6 @@
 	.endm
 
 
-	.macro save_x4_to_x29_sp_el0
-	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
-	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
-	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
-	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
-	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
-	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
-	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
-	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
-	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
-	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
-	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
-	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
-	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-	mrs	x18, sp_el0
-	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
-	.endm
-
-
 vector_base runtime_exceptions
 
 	/* ---------------------------------------------------------------------
@@ -239,23 +290,29 @@
 	 * to a valid cpu context where the general purpose and system register
 	 * state can be saved.
 	 */
+	check_and_unmask_ea
 	handle_sync_exception
 	check_vector_size sync_exception_aarch64
 
 vector_entry irq_aarch64
+	check_and_unmask_ea
 	handle_interrupt_exception irq_aarch64
 	check_vector_size irq_aarch64
 
 vector_entry fiq_aarch64
+	check_and_unmask_ea
 	handle_interrupt_exception fiq_aarch64
 	check_vector_size fiq_aarch64
 
 vector_entry serror_aarch64
+	msr	daifclr, #DAIF_ABT_BIT
+
 	/*
-	 * SError exceptions from lower ELs are not currently supported.
-	 * Report their occurrence.
+	 * Explicitly save x30 so as to free up a register and to enable
+	 * branching
 	 */
-	b	report_unhandled_exception
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+	handle_ea #ERROR_EA_ASYNC
 	check_vector_size serror_aarch64
 
 	/* ---------------------------------------------------------------------
@@ -269,23 +326,29 @@
 	 * to a valid cpu context where the general purpose and system register
 	 * state can be saved.
 	 */
+	check_and_unmask_ea
 	handle_sync_exception
 	check_vector_size sync_exception_aarch32
 
 vector_entry irq_aarch32
+	check_and_unmask_ea
 	handle_interrupt_exception irq_aarch32
 	check_vector_size irq_aarch32
 
 vector_entry fiq_aarch32
+	check_and_unmask_ea
 	handle_interrupt_exception fiq_aarch32
 	check_vector_size fiq_aarch32
 
 vector_entry serror_aarch32
+	msr	daifclr, #DAIF_ABT_BIT
+
 	/*
-	 * SError exceptions from lower ELs are not currently supported.
-	 * Report their occurrence.
+	 * Explicitly save x30 so as to free up a register and to enable
+	 * branching
 	 */
-	b	report_unhandled_exception
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+	handle_ea #ERROR_EA_ASYNC
 	check_vector_size serror_aarch32
 
 
@@ -345,7 +408,21 @@
 	 *
 	 * Save x4-x29 and sp_el0.
 	 */
-	save_x4_to_x29_sp_el0
+	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+	mrs	x18, sp_el0
+	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
 
 	mov	x5, xzr
 	mov	x6, sp
@@ -431,14 +508,12 @@
 
 smc_unknown:
 	/*
-	 * Here we restore x4-x18 regardless of where we came from. AArch32
-	 * callers will find the registers contents unchanged, but AArch64
-	 * callers will find the registers modified (with stale earlier NS
-	 * content). Either way, we aren't leaking any secure information
-	 * through them.
+	 * Unknown SMC call. Populate return value with SMC_UNK, restore
+	 * GP registers, and return to caller.
 	 */
 	mov	x0, #SMC_UNK
-	b	restore_gp_registers_callee_eret
+	str	x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	b	restore_gp_registers_eret
 
 smc_prohibited:
 	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
@@ -450,3 +525,62 @@
 	msr	spsel, #1
 	no_ret	report_unhandled_exception
 endfunc smc_handler
+
+/*
+ * Delegate External Abort handling to platform's EA handler. This function
+ * assumes that all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func delegate_ea
+	/* Save EL3 state */
+	mrs	x2, spsr_el3
+	mrs	x3, elr_el3
+	stp	x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
+	/*
+	 * Save ESR as handling might involve lower ELs, and returning back to
+	 * EL3 from there would trample the original ESR.
+	 */
+	mrs	x4, scr_el3
+	mrs	x5, esr_el3
+	stp	x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+
+	/*
+	 * Setup rest of arguments, and call platform External Abort handler.
+	 *
+	 * x0: EA reason (already in place)
+	 * x1: Exception syndrome (already in place).
+	 * x2: Cookie (unused for now).
+	 * x3: Context pointer.
+	 * x4: Flags (security state from SCR for now).
+	 */
+	mov	x2, xzr
+	mov	x3, sp
+	ubfx	x4, x4, #0, #1
+
+	/* Switch to runtime stack */
+	ldr	x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+	msr	spsel, #0
+	mov	sp, x5
+
+	mov	x29, x30
+	bl	plat_ea_handler
+	mov	x30, x29
+
+	/* Make SP point to context */
+	msr	spsel, #1
+
+	/* Restore EL3 state */
+	ldp	x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+	msr	spsr_el3, x1
+	msr	elr_el3, x2
+
+	/* Restore ESR_EL3 and SCR_EL3 */
+	ldp	x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+	msr	scr_el3, x3
+	msr	esr_el3, x4
+
+	ret
+endfunc delegate_ea
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index aed54a6..a1719ef 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -390,6 +390,14 @@
    handled at EL3, and a panic will result. This is supported only for AArch64
    builds.
 
+-  ``FAULT_INJECTION_SUPPORT``: ARMv8.4 externsions introduced support for fault
+   injection from lower ELs, and this build option enables lower ELs to use
+   Error Records accessed via System Registers to inject faults. This is
+   applicable only to AArch64 builds.
+
+   This feature is intended for testing purposes only, and is advisable to keep
+   disabled for production images.
+
 -  ``FIP_NAME``: This is an optional build option which specifies the FIP
    filename for the ``fip`` target. Default is ``fip.bin``.
 
@@ -531,6 +539,15 @@
    smc function id. When this option is enabled on Arm platforms, the
    option ``ARM_RECOM_STATE_ID_ENC`` needs to be set to 1 as well.
 
+-  ``RAS_EXTENSION``: When set to ``1``, enable Armv8.2 RAS features. RAS features
+   are an optional extension for pre-Armv8.2 CPUs, but are mandatory for Armv8.2
+   or later CPUs.
+
+   When ``RAS_EXTENSION`` is set to ``1``, ``HANDLE_EA_EL3_FIRST`` must also be
+   set to ``1``.
+
+   This option is disabled by default.
+
 -  ``RESET_TO_BL31``: Enable BL31 entrypoint as the CPU reset vector instead
    of the BL1 entrypoint. It can take the value 0 (CPU reset to BL1
    entrypoint) or 1 (CPU reset to BL31 entrypoint).
diff --git a/drivers/console/aarch64/multi_console.S b/drivers/console/aarch64/multi_console.S
index 15c3ba4..a85a6a5 100644
--- a/drivers/console/aarch64/multi_console.S
+++ b/drivers/console/aarch64/multi_console.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,7 +10,8 @@
 
 	.globl	console_register
 	.globl	console_unregister
-	.globl  console_set_scope
+	.globl	console_is_registered
+	.globl	console_set_scope
 	.globl	console_switch_state
 	.globl	console_putc
 	.globl	console_getc
@@ -38,13 +39,15 @@
 	 * persistent memory (e.g. the data section).
 	 * In : x0 - address of console_t structure
 	 * Out: x0 - Always 1 (for easier tail calling)
-	 * Clobber list: x0, x1, x14
+	 * Clobber list: x0, x1, x14, x15
 	 * -----------------------------------------------
 	 */
 func console_register
 #if ENABLE_ASSERTIONS
+	/* Assert that x0 isn't a NULL pointer */
 	cmp	x0, #0
 	ASM_ASSERT(ne)
+	/* Assert that the struct isn't in the stack */
 	adrp	x1, __STACKS_START__
 	add	x1, x1, :lo12:__STACKS_START__
 	cmp	x0, x1
@@ -54,6 +57,14 @@
 	cmp	x0, x1
 	ASM_ASSERT(hs)
 not_on_stack:
+	/* Assert that this struct isn't in the list */
+	mov	x1, x0 /* Preserve x0 and x30 */
+	mov	x15, x30
+	bl	console_is_registered
+	cmp	x0, #0
+	ASM_ASSERT(eq)
+	mov	x30, x15
+	mov	x0, x1
 #endif /* ENABLE_ASSERTIONS */
 	adrp	x14, console_list
 	ldr	x1, [x14, :lo12:console_list]	/* X1 = first struct in list */
@@ -73,6 +84,11 @@
 	 * -----------------------------------------------
 	 */
 func console_unregister
+#if ENABLE_ASSERTIONS
+	/* Assert that x0 isn't a NULL pointer */
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
 	adrp	x14, console_list
 	add	x14, x14, :lo12:console_list	/* X14 = ptr to first struct */
 	ldr	x1, [x14]			/* X1 = first struct */
@@ -96,6 +112,37 @@
 endfunc console_unregister
 
 	/* -----------------------------------------------
+	 * int console_is_registered(console_t *console)
+	 * Function to detect if a specific console is
+	 * registered or not.
+	 * In: x0 - address of console_t struct to remove
+	 * Out: x0 - 1 if it is registered, 0 if not.
+	 * Clobber list: x0, x14
+	 * -----------------------------------------------
+	 */
+func console_is_registered
+#if ENABLE_ASSERTIONS
+	/* Assert that x0 isn't a NULL pointer */
+	cmp	x0, #0
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+	adrp	x14, console_list
+	ldr	x14, [x14, :lo12:console_list]	/* X14 = first console struct */
+check_registered_loop:
+	cbz	x14, console_not_registered /* Check if end of list */
+	cmp	x0, x14		/* Check if the pointers are different */
+	b.eq	console_registered
+	ldr	x14, [x14, #CONSOLE_T_NEXT]	/* Get pointer to next struct */
+	b	check_registered_loop
+console_not_registered:
+	mov	x0, #0
+	ret
+console_registered:
+	mov	x0, #1
+	ret
+endfunc console_is_registered
+
+	/* -----------------------------------------------
 	 * void console_switch_state(unsigned int new_state)
 	 * Function to switch the current console state.
 	 * The console state determines which of the
diff --git a/include/bl31/ea_handle.h b/include/bl31/ea_handle.h
new file mode 100644
index 0000000..060c9b7
--- /dev/null
+++ b/include/bl31/ea_handle.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __EA_HANDLE_H__
+#define __EA_HANDLE_H__
+
+/* Constants indicating the reason for an External Abort */
+
+/* External Abort received at SError vector */
+#define ERROR_EA_ASYNC		0
+
+/* Synchronous External Abort received at Synchronous exception vector */
+#define ERROR_EA_SYNC		1
+
+/* External Abort synchronized by ESB instruction */
+#define ERROR_EA_ESB		2
+
+/* RAS event signalled as peripheral interrupt */
+#define ERROR_INTERRUPT		3
+
+#endif /* __EA_HANDLE_H__ */
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
index 94a9df9..7c8e643 100644
--- a/include/common/aarch64/asm_macros.S
+++ b/include/common/aarch64/asm_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -192,4 +192,10 @@
 	.space	SPINLOCK_ASM_SIZE
 	.endm
 
+#if RAS_EXTENSION
+	.macro esb
+	.inst	0xd503221f
+	.endm
+#endif
+
 #endif /* __ASM_MACROS_S__ */
diff --git a/include/drivers/console.h b/include/drivers/console.h
index f8ec83d..0855170 100644
--- a/include/drivers/console.h
+++ b/include/drivers/console.h
@@ -50,7 +50,12 @@
  */
 /* Remove a single console_t instance from the console list. */
 int console_unregister(console_t *console);
-/* Set scope mask of a console that determines in what states it is active. */
+/* Returns 1 if this console is already registered, 0 if not */
+int console_is_registered(console_t *console);
+/*
+ * Set scope mask of a console that determines in what states it is active.
+ * By default they are registered with (CONSOLE_FLAG_BOOT|CONSOLE_FLAG_CRASH).
+ */
 void console_set_scope(console_t *console, unsigned int scope);
 
 /* Switch to a new global console state (CONSOLE_FLAG_BOOT/RUNTIME/CRASH). */
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index ff3881e..92bb97d 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -215,6 +215,7 @@
 
 /* SCR definitions */
 #define SCR_RES1_BITS		((U(1) << 4) | (U(1) << 5))
+#define SCR_FIEN_BIT		(U(1) << 21)
 #define SCR_TWE_BIT		(U(1) << 13)
 #define SCR_TWI_BIT		(U(1) << 12)
 #define SCR_ST_BIT		(U(1) << 11)
@@ -528,6 +529,12 @@
 #define EC_AARCH64_FP			U(0x2c)
 #define EC_SERROR			U(0x2f)
 
+/*
+ * External Abort bit in Instruction and Data Aborts synchronous exception
+ * syndromes.
+ */
+#define ESR_ISS_EABORT_EA_BIT		U(9)
+
 #define EC_BITS(x)			(((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
 
 /* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
@@ -705,4 +712,23 @@
 #define AMCGCR_EL0_CG1NC_LENGTH	U(8)
 #define AMCGCR_EL0_CG1NC_MASK	U(0xff)
 
+/*******************************************************************************
+ * RAS system registers
+ *******************************************************************************/
+#define DISR_EL1		S3_0_C12_C1_1
+#define DISR_A_BIT		31
+
+#define ERRIDR_EL1		S3_0_C5_C3_0
+#define ERRIDR_MASK		0xffff
+
+#define ERRSELR_EL1		S3_0_C5_C3_1
+
+/* System register access to Standard Error Record registers */
+#define ERXFR_EL1		S3_0_C5_C4_0
+#define ERXCTLR_EL1		S3_0_C5_C4_1
+#define ERXSTATUS_EL1		S3_0_C5_C4_2
+#define ERXADDR_EL1		S3_0_C5_C4_3
+#define ERXMISC0_EL1		S3_0_C5_C4_4
+#define ERXMISC1_EL1		S3_0_C5_C4_5
+
 #endif /* __ARCH_H__ */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index c346f79..58ec943 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -333,6 +333,16 @@
 DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
 DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
 
+DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erxfr_el1, ERXFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxctlr_el1, ERXCTLR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxstatus_el1, ERXSTATUS_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
+
 #define IS_IN_EL(x) \
 	(GET_EL(read_CurrentEl()) == MODE_EL##x)
 
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 5f6bdc9..cdd74a3 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -7,6 +7,8 @@
 #ifndef __CONTEXT_H__
 #define __CONTEXT_H__
 
+#include <utils_def.h>
+
 /*******************************************************************************
  * Constants that allow assembler code to access members of and the 'gp_regs'
  * structure at their correct offsets.
@@ -53,10 +55,12 @@
  ******************************************************************************/
 #define CTX_EL3STATE_OFFSET	(CTX_GPREGS_OFFSET + CTX_GPREGS_END)
 #define CTX_SCR_EL3		U(0x0)
-#define CTX_RUNTIME_SP		U(0x8)
-#define CTX_SPSR_EL3		U(0x10)
-#define CTX_ELR_EL3		U(0x18)
-#define CTX_EL3STATE_END	U(0x20)
+#define CTX_ESR_EL3		U(0x8)
+#define CTX_RUNTIME_SP		U(0x10)
+#define CTX_SPSR_EL3		U(0x18)
+#define CTX_ELR_EL3		U(0x20)
+#define CTX_UNUSED		U(0x28)
+#define CTX_EL3STATE_END	U(0x30)
 
 /*******************************************************************************
  * Constants that allow assembler code to access members of and the
diff --git a/include/lib/extensions/ras.h b/include/lib/extensions/ras.h
new file mode 100644
index 0000000..f57fc3a
--- /dev/null
+++ b/include/lib/extensions/ras.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __RAS_COMMON__
+#define __RAS_COMMON__
+
+#define ERR_HANDLER_VERSION	1
+
+/* Error record access mechanism */
+#define ERR_ACCESS_SYSREG	0
+#define ERR_ACCESS_MEMMAP	1
+
+/*
+ * Register all error records on the platform.
+ *
+ * This macro must be used in the same file as the array of error record info
+ * are declared. Only then would ARRAY_SIZE() yield a meaningful value.
+ */
+#define REGISTER_ERR_RECORD_INFO(_records) \
+	const struct err_record_mapping err_record_mapping = { \
+		.err_records = _records, \
+		.num_err_records = ARRAY_SIZE(_records), \
+	}
+
+/* Error record info iterator */
+#define for_each_err_record_info(_i, _info) \
+	for (_i = 0, _info = err_record_mapping.err_records; \
+		_i < err_record_mapping.num_err_records; \
+		_i++, _info++)
+
+#define _ERR_RECORD_COMMON(_probe, _handler, _aux) \
+	.probe = _probe, \
+	.handler = _handler, \
+	.aux_data = _aux,
+
+#define ERR_RECORD_SYSREG_V1(_idx_start, _num_idx, _probe, _handler, _aux) \
+	{ \
+		.version = 1, \
+		.sysreg.idx_start = _idx_start, \
+		.sysreg.num_idx = _num_idx, \
+		.access = ERR_ACCESS_SYSREG, \
+		_ERR_RECORD_COMMON(_probe, _handler, _aux) \
+	}
+
+#define ERR_RECORD_MEMMAP_V1(_base_addr, _size_num_k, _probe, _handler, _aux) \
+	{ \
+		.version = 1, \
+		.memmap.base_addr = _base_addr, \
+		.memmap.size_num_k = _size_num_k, \
+		.access = ERR_ACCESS_MEMMAP, \
+		_ERR_RECORD_COMMON(_probe, _handler, _aux) \
+	}
+
+/*
+ * Macro to be used to name and declare an array of RAS interrupts along with
+ * their handlers.
+ *
+ * This macro must be used in the same file as the array of interrupts are
+ * declared. Only then would ARRAY_SIZE() yield a meaningful value. Also, the
+ * array is expected to be sorted in the increasing order of interrupt number.
+ */
+#define REGISTER_RAS_INTERRUPTS(_array) \
+	const struct ras_interrupt_mapping ras_interrupt_mapping = { \
+		.intrs = _array, \
+		.num_intrs = ARRAY_SIZE(_array), \
+	}
+
+#ifndef __ASSEMBLY__
+
+#include <assert.h>
+#include <ras_arch.h>
+
+struct err_record_info;
+
+struct ras_interrupt {
+	/* Interrupt number, and the associated error record info */
+	unsigned int intr_number;
+	struct err_record_info *err_record;
+	void *cookie;
+};
+
+/* Function to probe a error record group for error */
+typedef int (*err_record_probe_t)(const struct err_record_info *info,
+		int *probe_data);
+
+/* Data passed to error record group handler */
+struct err_handler_data {
+	/* Info passed on from top-level exception handler */
+	uint64_t flags;
+	void *cookie;
+	void *handle;
+
+	/* Data structure version */
+	unsigned int version;
+
+	/* Reason for EA: one the ERROR_* constants */
+	unsigned int ea_reason;
+
+	/*
+	 * For EAs received at vector, the value read from ESR; for an EA
+	 * synchronized by ESB, the value of DISR.
+	 */
+	uint32_t syndrome;
+
+	/* For errors signalled via. interrupt, the raw interrupt ID; otherwise, 0. */
+	unsigned int interrupt;
+};
+
+/* Function to handle error from an error record group */
+typedef int (*err_record_handler_t)(const struct err_record_info *info,
+		int probe_data, const struct err_handler_data *const data);
+
+/* Error record information */
+struct err_record_info {
+	/* Function to probe error record group for errors */
+	err_record_probe_t probe;
+
+	/* Function to handle error record group errors */
+	err_record_handler_t handler;
+
+	/* Opaque group-specific data */
+	void *aux_data;
+
+	/* Additional information for Standard Error Records */
+	union {
+		struct {
+			/*
+			 * For a group accessed via. memory-mapped register,
+			 * base address of the page hosting error records, and
+			 * the size of the record group.
+			 */
+			uintptr_t base_addr;
+
+			/* Size of group in number of KBs */
+			unsigned int size_num_k;
+		} memmap;
+
+		struct {
+			/*
+			 * For error records accessed via. system register, index of
+			 * the error record.
+			 */
+			unsigned int idx_start;
+			unsigned int num_idx;
+		} sysreg;
+	};
+
+	/* Data structure version */
+	unsigned int version;
+
+	/* Error record access mechanism */
+	unsigned int access:1;
+};
+
+struct err_record_mapping {
+	struct err_record_info *err_records;
+	size_t num_err_records;
+};
+
+struct ras_interrupt_mapping {
+	struct ras_interrupt *intrs;
+	size_t num_intrs;
+};
+
+extern const struct err_record_mapping err_record_mapping;
+extern const struct ras_interrupt_mapping ras_interrupt_mapping;
+
+
+/*
+ * Helper functions to probe memory-mapped and system registers implemented in
+ * Standard Error Record format
+ */
+static inline int ras_err_ser_probe_memmap(const struct err_record_info *info,
+		int *probe_data)
+{
+	assert(info->version == ERR_HANDLER_VERSION);
+
+	return ser_probe_memmap(info->memmap.base_addr, info->memmap.size_num_k,
+		probe_data);
+}
+
+static inline int ras_err_ser_probe_sysreg(const struct err_record_info *info,
+		int *probe_data)
+{
+	assert(info->version == ERR_HANDLER_VERSION);
+
+	return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx,
+			probe_data);
+}
+
+int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+		void *handle, uint64_t flags);
+void ras_init(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __RAS_COMMON__ */
diff --git a/include/lib/extensions/ras_arch.h b/include/lib/extensions/ras_arch.h
new file mode 100644
index 0000000..7d21053
--- /dev/null
+++ b/include/lib/extensions/ras_arch.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __RAS_H__
+#define __RAS_H__
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context.h>
+#include <mmio.h>
+#include <stdint.h>
+
+/*
+ * Size of nodes implementing Standard Error Records - currently only 4k is
+ * supported.
+ */
+#define STD_ERR_NODE_SIZE_NUM_K		4
+
+/*
+ * Individual register offsets within an error record in Standard Error Record
+ * format when error records are accessed through memory-mapped registers.
+ */
+#define ERR_FR(n)	(0x0 + (64 * (n)))
+#define ERR_CTLR(n)	(0x8 + (64 * (n)))
+#define ERR_STATUS(n)	(0x10 + (64 * (n)))
+#define ERR_ADDR(n)	(0x18 + (64 * (n)))
+#define ERR_MISC0(n)	(0x20 + (64 * (n)))
+#define ERR_MISC1(n)	(0x28 + (64 * (n)))
+
+/* Group Status Register (ERR_STATUS) offset */
+#define ERR_GSR(base, size_num_k, n) \
+	((base) + (0x380 * (size_num_k)) + (8 * (n)))
+
+/* Management register offsets */
+#define ERR_DEVID(base, size_num_k) \
+	((base) + ((0x400 * (size_num_k)) - 0x100) + 0xc8)
+
+#define ERR_DEVID_MASK	0xffff
+
+/* Standard Error Record status register fields */
+#define ERR_STATUS_AV_SHIFT	31
+#define ERR_STATUS_AV_MASK	U(0x1)
+
+#define ERR_STATUS_V_SHIFT	30
+#define ERR_STATUS_V_MASK	U(0x1)
+
+#define ERR_STATUS_UE_SHIFT	29
+#define ERR_STATUS_UE_MASK	U(0x1)
+
+#define ERR_STATUS_ER_SHIFT	28
+#define ERR_STATUS_ER_MASK	U(0x1)
+
+#define ERR_STATUS_OF_SHIFT	27
+#define ERR_STATUS_OF_MASK	U(0x1)
+
+#define ERR_STATUS_MV_SHIFT	26
+#define ERR_STATUS_MV_MASK	U(0x1)
+
+#define ERR_STATUS_CE_SHIFT	24
+#define ERR_STATUS_CE_MASK	U(0x3)
+
+#define ERR_STATUS_DE_SHIFT	23
+#define ERR_STATUS_DE_MASK	U(0x1)
+
+#define ERR_STATUS_PN_SHIFT	22
+#define ERR_STATUS_PN_MASK	U(0x1)
+
+#define ERR_STATUS_UET_SHIFT	20
+#define ERR_STATUS_UET_MASK	U(0x3)
+
+#define ERR_STATUS_IERR_SHIFT	8
+#define ERR_STATUS_IERR_MASK	U(0xff)
+
+#define ERR_STATUS_SERR_SHIFT	0
+#define ERR_STATUS_SERR_MASK	U(0xff)
+
+#define ERR_STATUS_GET_FIELD(_status, _field) \
+	(((_status) >> ERR_STATUS_ ##_field ##_SHIFT) & ERR_STATUS_ ##_field ##_MASK)
+
+#define ERR_STATUS_CLR_FIELD(_status, _field) \
+	(_status) &= ~(ERR_STATUS_ ##_field ##_MASK << ERR_STATUS_ ##_field ##_SHIFT)
+
+#define ERR_STATUS_SET_FIELD(_status, _field, _value) \
+	(_status) |= (((_value) & ERR_STATUS_ ##_field ##_MASK) << ERR_STATUS_ ##_field ##_SHIFT)
+
+#define ERR_STATUS_WRITE_FIELD(_status, _field, _value) do { \
+		ERR_STATUS_CLR_FIELD(_status, _field, _value); \
+		ERR_STATUS_SET_FIELD(_status, _field, _value); \
+	} while (0)
+
+
+/* Standard Error Record control register fields */
+#define ERR_CTLR_WDUI_SHIFT	11
+#define ERR_CTLR_WDUI_MASK	0x1
+
+#define ERR_CTLR_RDUI_SHIFT	10
+#define ERR_CTLR_RDUI_MASK	0x1
+#define ERR_CTLR_DUI_SHIFT	ERR_CTLR_RDUI_SHIFT
+#define ERR_CTLR_DUI_MASK	ERR_CTLR_RDUI_MASK
+
+#define ERR_CTLR_WCFI_SHIFT	9
+#define ERR_CTLR_WCFI_MASK	0x1
+
+#define ERR_CTLR_RCFI_SHIFT	8
+#define ERR_CTLR_RCFI_MASK	0x1
+#define ERR_CTLR_CFI_SHIFT	ERR_CTLR_RCFI_SHIFT
+#define ERR_CTLR_CFI_MASK	ERR_CTLR_RCFI_MASK
+
+#define ERR_CTLR_WUE_SHIFT	7
+#define ERR_CTLR_WUE_MASK	0x1
+
+#define ERR_CTLR_WFI_SHIFT	6
+#define ERR_CTLR_WFI_MASK	0x1
+
+#define ERR_CTLR_WUI_SHIFT	5
+#define ERR_CTLR_WUI_MASK	0x1
+
+#define ERR_CTLR_RUE_SHIFT	4
+#define ERR_CTLR_RUE_MASK	0x1
+#define ERR_CTLR_UE_SHIFT	ERR_CTLR_RUE_SHIFT
+#define ERR_CTLR_UE_MASK	ERR_CTLR_RUE_MASK
+
+#define ERR_CTLR_RFI_SHIFT	3
+#define ERR_CTLR_RFI_MASK	0x1
+#define ERR_CTLR_FI_SHIFT	ERR_CTLR_RFI_SHIFT
+#define ERR_CTLR_FI_MASK	ERR_CTLR_RFI_MASK
+
+#define ERR_CTLR_RUI_SHIFT	2
+#define ERR_CTLR_RUI_MASK	0x1
+#define ERR_CTLR_UI_SHIFT	ERR_CTLR_RUI_SHIFT
+#define ERR_CTLR_UI_MASK	ERR_CTLR_RUI_MASK
+
+#define ERR_CTLR_ED_SHIFT	0
+#define ERR_CTLR_ED_MASK	0x1
+
+#define ERR_CTLR_CLR_FIELD(_ctlr, _field) \
+	(_ctlr) &= ~(ERR_CTLR_ ##_field _MASK << ERR_CTLR_ ##_field ##_SHIFT)
+
+#define ERR_CTLR_SET_FIELD(_ctlr, _field, _value) \
+	(_ctlr) |= (((_value) & ERR_CTLR_ ##_field ##_MASK) << ERR_CTLR_ ##_field ##_SHIFT)
+
+#define ERR_CTLR_ENABLE_FIELD(_ctlr, _field) \
+	ERR_CTLR_SET_FIELD(_ctlr, _field, ERR_CTLR_ ##_field ##_MASK)
+
+/* Uncorrected error types */
+#define ERROR_STATUS_UET_UC	0x0	/* Uncontainable */
+#define ERROR_STATUS_UET_UEU	0x1	/* Unrecoverable */
+#define ERROR_STATUS_UET_UEO	0x2	/* Restable */
+#define ERROR_STATUS_UET_UER	0x3	/* Recoverable */
+
+
+/*
+ * Standard Error Record accessors for memory-mapped registers.
+ */
+
+static inline uint64_t ser_get_feature(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_FR(idx));
+}
+
+static inline uint64_t ser_get_control(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_CTLR(idx));
+}
+
+static inline uint64_t ser_get_status(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_STATUS(idx));
+}
+
+/*
+ * Error handling agent would write to the status register to clear an
+ * identified/handled error. Most fields in the status register are
+ * conditional write-one-to-clear.
+ *
+ * Typically, to clear the status, it suffices to write back the same value
+ * previously read. However, if there were new, higher-priority errors recorded
+ * on the node since status was last read, writing read value won't clear the
+ * status. Therefore, an error handling agent must wait on and verify the status
+ * has indeed been cleared.
+ */
+static inline void ser_set_status(uintptr_t base, unsigned int idx,
+		uint64_t status)
+{
+	mmio_write_64(base + ERR_STATUS(idx), status);
+}
+
+static inline uint64_t ser_get_addr(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_ADDR(idx));
+}
+
+static inline uint64_t ser_get_misc0(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_MISC0(idx));
+}
+
+static inline uint64_t ser_get_misc1(uintptr_t base, unsigned int idx)
+{
+	return mmio_read_64(base + ERR_MISC1(idx));
+}
+
+
+/*
+ * Standard Error Record helpers for System registers.
+ */
+static inline void ser_sys_select_record(unsigned int idx)
+{
+	unsigned int max_idx __unused = read_erridr_el1() & ERRIDR_MASK;
+
+	assert(idx < max_idx);
+
+	write_errselr_el1(idx);
+	isb();
+}
+
+/* Library functions to probe Standard Error Record */
+int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data);
+int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_data);
+
+#endif /* __RAS_H__ */
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
index 8abc73c..31b1294 100644
--- a/include/lib/utils_def.h
+++ b/include/lib/utils_def.h
@@ -68,6 +68,13 @@
 	(((ptr) > UINTPTR_MAX - (inc)) ? 1 : 0)
 
 /*
+ * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
+ * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
+ */
+#define check_u32_overflow(u32, inc) \
+	((u32) > (UINT32_MAX - (inc)) ? 1 : 0)
+
+/*
  * For those constants to be shared between C and other sources, apply a 'u'
  * or 'ull' suffix to the argument only in C, to avoid undefined or unintended
  * behaviour.
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index 95e986b..d10afae 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -483,6 +483,7 @@
 #define PLAT_PERCPU_BAKERY_LOCK_SIZE		(1 * CACHE_WRITEBACK_GRANULE)
 
 /* Priority levels for ARM platforms */
+#define PLAT_RAS_PRI			0x10
 #define PLAT_SDEI_CRITICAL_PRI		0x60
 #define PLAT_SDEI_NORMAL_PRI		0x70
 
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index aa181c8..cd17a00 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -124,6 +124,9 @@
 void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr);
 #endif
 
+void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+		void *handle, uint64_t flags);
+
 /*
  * The following function is mandatory when the
  * firmware update feature is used.
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 620ec16..121ca4d 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,8 +15,8 @@
 	.global	fpregs_context_restore
 #endif
 	.global	save_gp_registers
+	.global	restore_gp_registers
 	.global	restore_gp_registers_eret
-	.global	restore_gp_registers_callee_eret
 	.global	el3_exit
 
 /* -----------------------------------------------------
@@ -332,30 +332,50 @@
 	ret
 endfunc save_gp_registers
 
-func restore_gp_registers_eret
+/*
+ * This function restores all general purpose registers except x30 from the
+ * CPU context. x30 register must be explicitly restored by the caller.
+ */
+func restore_gp_registers
 	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
 	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-	b	restore_gp_registers_callee_eret
-endfunc restore_gp_registers_eret
-
-func restore_gp_registers_callee_eret
 	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
 	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
 	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
 	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
 	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
 	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
 	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
 	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
 	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
 	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
 	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
+	msr	sp_el0, x28
 	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-	ldp	x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-	msr	sp_el0, x17
-	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+	ret
+endfunc restore_gp_registers
+
+/*
+ * Restore general purpose registers (including x30), and exit EL3 via. ERET to
+ * a lower exception level.
+ */
+func restore_gp_registers_eret
+	bl	restore_gp_registers
+	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+#if IMAGE_BL31 && RAS_EXTENSION
+	/*
+	 * Issue Error Synchronization Barrier to synchronize SErrors before
+	 * exiting EL3. We're running with EAs unmasked, so any synchronized
+	 * errors would be taken immediately; therefore no need to inspect
+	 * DISR_EL1 register.
+	 */
+	esb
+#endif
 	eret
-endfunc	restore_gp_registers_callee_eret
+endfunc	restore_gp_registers_eret
 
 	/* -----------------------------------------------------
 	 * This routine assumes that the SP_EL3 is pointing to
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 2608d1f..76eecc1 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -114,6 +114,11 @@
 	scr_el3 &= ~SCR_EA_BIT;
 #endif
 
+#if FAULT_INJECTION_SUPPORT
+	/* Enable fault injection from lower ELs */
+	scr_el3 |= SCR_FIEN_BIT;
+#endif
+
 #ifdef IMAGE_BL31
 	/*
 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ rounting as
diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c
new file mode 100644
index 0000000..0335a7b
--- /dev/null
+++ b/lib/extensions/ras/ras_common.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <ea_handle.h>
+#include <ehf.h>
+#include <platform.h>
+#include <ras.h>
+#include <ras_arch.h>
+
+#ifndef PLAT_RAS_PRI
+# error Platform must define RAS priority value
+#endif
+
+/* Handler that receives External Aborts on RAS-capable systems */
+int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+		void *handle, uint64_t flags)
+{
+	unsigned int i, n_handled = 0, ret;
+	int probe_data;
+	struct err_record_info *info;
+
+	const struct err_handler_data err_data = {
+		.version = ERR_HANDLER_VERSION,
+		.ea_reason = ea_reason,
+		.interrupt = 0,
+		.syndrome = syndrome,
+		.flags = flags,
+		.cookie = cookie,
+		.handle = handle
+	};
+
+	for_each_err_record_info(i, info) {
+		assert(info->probe != NULL);
+		assert(info->handler != NULL);
+
+		/* Continue probing until the record group signals no error */
+		while (1) {
+			if (info->probe(info, &probe_data) == 0)
+				break;
+
+			/* Handle error */
+			ret = info->handler(info, probe_data, &err_data);
+			if (ret != 0)
+				return ret;
+
+			n_handled++;
+		}
+	}
+
+	return (n_handled != 0);
+}
+
+#if ENABLE_ASSERTIONS
+static void assert_interrupts_sorted(void)
+{
+	unsigned int i, last;
+	struct ras_interrupt *start = ras_interrupt_mapping.intrs;
+
+	if (ras_interrupt_mapping.num_intrs == 0)
+		return;
+
+	last = start[0].intr_number;
+	for (i = 1; i < ras_interrupt_mapping.num_intrs; i++) {
+		assert(start[i].intr_number > last);
+		last = start[i].intr_number;
+	}
+}
+#endif
+
+/*
+ * Given an RAS interrupt number, locate the registered handler and call it. If
+ * no handler was found for the interrupt number, this function panics.
+ */
+static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags,
+		void *handle, void *cookie)
+{
+	struct ras_interrupt *ras_inrs = ras_interrupt_mapping.intrs;
+	struct ras_interrupt *selected = NULL;
+	int start, end, mid, probe_data, ret __unused;
+
+	const struct err_handler_data err_data = {
+		.version = ERR_HANDLER_VERSION,
+		.interrupt = intr_raw,
+		.flags = flags,
+		.cookie = cookie,
+		.handle = handle
+	};
+
+	assert(ras_interrupt_mapping.num_intrs > 0);
+
+	start = 0;
+	end = ras_interrupt_mapping.num_intrs;
+	while (start <= end) {
+		mid = ((end + start) / 2);
+		if (intr_raw == ras_inrs[mid].intr_number) {
+			selected = &ras_inrs[mid];
+			break;
+		} else if (intr_raw < ras_inrs[mid].intr_number) {
+			/* Move left */
+			end = mid - 1;
+		} else {
+			/* Move right */
+			start = mid + 1;
+		}
+	}
+
+	if (selected == NULL) {
+		ERROR("RAS interrupt %u has no handler!\n", intr_raw);
+		panic();
+	}
+
+
+	ret = selected->err_record->probe(selected->err_record, &probe_data);
+	assert(ret != 0);
+
+	/* Call error handler for the record group */
+	assert(selected->err_record->handler != NULL);
+	selected->err_record->handler(selected->err_record, probe_data,
+			&err_data);
+
+	return 0;
+}
+
+void ras_init(void)
+{
+#if ENABLE_ASSERTIONS
+	/* Check RAS interrupts are sorted */
+	assert_interrupts_sorted();
+#endif
+
+	/* Register RAS priority handler */
+	ehf_register_priority_handler(PLAT_RAS_PRI, ras_interrupt_handler);
+}
diff --git a/lib/extensions/ras/std_err_record.c b/lib/extensions/ras/std_err_record.c
new file mode 100644
index 0000000..65c007f
--- /dev/null
+++ b/lib/extensions/ras/std_err_record.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <ras_arch.h>
+#include <utils_def.h>
+
+/*
+ * Probe for error in memory-mapped registers containing error records
+ * implemented Standard Error Record format. Upon detecting an error, set probe
+ * data to the index of the record in error, and return 1; otherwise, return 0.
+ */
+int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data)
+{
+	int num_records, num_group_regs, i;
+	uint64_t gsr;
+
+	assert(base != 0);
+
+	/* Only 4K supported for now */
+	assert(size_num_k == STD_ERR_NODE_SIZE_NUM_K);
+
+	num_records = (mmio_read_32(ERR_DEVID(base, size_num_k)) & ERR_DEVID_MASK);
+
+	/* A group register shows error status for 2^6 error records */
+	num_group_regs = (num_records >> 6) + 1;
+
+	/* Iterate through group registers to find a record in error */
+	for (i = 0; i < num_group_regs; i++) {
+		gsr = mmio_read_64(ERR_GSR(base, size_num_k, i));
+		if (gsr == 0)
+			continue;
+
+		/* Return the index of the record in error */
+		if (probe_data != NULL)
+			*probe_data = ((i << 6) + __builtin_ctz(gsr));
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Probe for error in System Registers where error records are implemented in
+ * Standard Error Record format. Upon detecting an error, set probe data to the
+ * index of the record in error, and return 1; otherwise, return 0.
+ */
+int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_data)
+{
+	int i;
+	uint64_t status;
+	unsigned int max_idx __unused = read_erridr_el1() & ERRIDR_MASK;
+
+	assert(idx_start < max_idx);
+	assert(check_u32_overflow(idx_start, num_idx) == 0);
+	assert((idx_start + num_idx - 1) < max_idx);
+
+	for (i = 0; i < num_idx; i++) {
+		/* Select the error record */
+		ser_sys_select_record(idx_start + i);
+
+		/* Retrieve status register from the error record */
+		status = read_erxstatus_el1();
+
+		/* Check for valid field in status */
+		if (ERR_STATUS_GET_FIELD(status, V)) {
+			if (probe_data != NULL)
+				*probe_data = i;
+			return 1;
+		}
+	}
+
+	return 0;
+}
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 9e95cd5..4bbff03 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -76,6 +76,9 @@
 # Build flag to treat usage of deprecated platform and framework APIs as error.
 ERROR_DEPRECATED		:= 0
 
+# Fault injection support
+FAULT_INJECTION_SUPPORT		:= 0
+
 # Byte alignment that each component in FIP is aligned to
 FIP_ALIGN			:= 0
 
@@ -92,6 +95,10 @@
 # default, they are for Secure EL1.
 GICV2_G0_FOR_EL3		:= 0
 
+# Route External Aborts to EL3. Disabled by default; External Aborts are handled
+# by lower ELs.
+HANDLE_EA_EL3_FIRST		:= 0
+
 # Whether system coherency is managed in hardware, without explicit software
 # operations.
 HW_ASSISTED_COHERENCY		:= 0
@@ -120,6 +127,9 @@
 # Original format.
 PSCI_EXTENDED_STATE_ID		:= 0
 
+# Enable RAS support
+RAS_EXTENSION			:= 0
+
 # By default, BL1 acts as the reset handler, not BL31
 RESET_TO_BL31			:= 0
 
diff --git a/plat/arm/common/aarch64/arm_ehf.c b/plat/arm/common/aarch64/arm_ehf.c
index 785b7bb..665871b 100644
--- a/plat/arm/common/aarch64/arm_ehf.c
+++ b/plat/arm/common/aarch64/arm_ehf.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,11 @@
  * Enumeration of priority levels on ARM platforms.
  */
 ehf_pri_desc_t arm_exceptions[] = {
+#if RAS_EXTENSION
+	/* RAS Priority */
+	EHF_PRI_DESC(ARM_PRI_BITS, PLAT_RAS_PRI),
+#endif
+
 #if SDEI_SUPPORT
 	/* Critical priority SDEI */
 	EHF_PRI_DESC(ARM_PRI_BITS, PLAT_SDEI_CRITICAL_PRI),
diff --git a/plat/arm/common/aarch64/arm_ras.c b/plat/arm/common/aarch64/arm_ras.c
new file mode 100644
index 0000000..80dfaf1
--- /dev/null
+++ b/plat/arm/common/aarch64/arm_ras.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <ras.h>
+
+struct ras_interrupt arm_ras_interrupts[] = {
+};
+
+struct err_record_info arm_err_records[] = {
+};
+
+REGISTER_ERR_RECORD_INFO(arm_err_records);
+REGISTER_RAS_INTERRUPTS(arm_ras_interrupts);
diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c
index 3c70f9d..b483f0c 100644
--- a/plat/arm/common/arm_bl31_setup.c
+++ b/plat/arm/common/arm_bl31_setup.c
@@ -14,6 +14,7 @@
 #include <mmio.h>
 #include <plat_arm.h>
 #include <platform.h>
+#include <ras.h>
 
 #define BL31_END (uintptr_t)(&__BL31_END__)
 
@@ -221,6 +222,10 @@
 
 	/* Initialize power controller before setting up topology */
 	plat_arm_pwrc_setup();
+
+#if RAS_EXTENSION
+	ras_init();
+#endif
 }
 
 /*******************************************************************************
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 015e454..2a78730 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -203,6 +203,13 @@
 BL31_SOURCES		+=	plat/arm/common/aarch64/arm_sdei.c
 endif
 
+# RAS sources
+ifeq (${RAS_EXTENSION},1)
+BL31_SOURCES		+=	lib/extensions/ras/std_err_record.c		\
+				lib/extensions/ras/ras_common.c \
+				plat/arm/common/aarch64/arm_ras.c
+endif
+
 ifneq (${TRUSTED_BOARD_BOOT},0)
 
     # Include common TBB sources
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
index 7a2f38c..409ae55 100644
--- a/plat/common/aarch64/plat_common.c
+++ b/plat/common/aarch64/plat_common.c
@@ -8,6 +8,9 @@
 #include <assert.h>
 #include <console.h>
 #include <platform.h>
+#if RAS_EXTENSION
+#include <ras.h>
+#endif
 #include <xlat_mmu_helpers.h>
 
 /*
@@ -28,6 +31,8 @@
 #pragma weak plat_sdei_validate_entry_point
 #endif
 
+#pragma weak plat_ea_handler
+
 void bl31_plat_enable_mmu(uint32_t flags)
 {
 	enable_mmu_el3(flags);
@@ -105,3 +110,20 @@
 	return 0;
 }
 #endif
+
+/* RAS functions common to AArch64 ARM platforms */
+void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+		void *handle, uint64_t flags)
+{
+#if RAS_EXTENSION
+	/* Call RAS EA handler */
+	int handled = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags);
+	if (handled != 0)
+		return;
+#endif
+
+	ERROR("Unhandled External Abort received on 0x%lx at EL3!\n",
+			read_mpidr_el1());
+	ERROR(" exception reason=%u syndrome=0x%llx\n", ea_reason, syndrome);
+	panic();
+}