fix(el3-runtime): restrict lower el EA handlers in FFH mode

This patch does following changes to restrict handling of lower EL
EA's only if FFH mode is enabled.

 - Compile ea_delegate.S only if FFH mode is enabled.
 - For Sync exception from lower ELs if the EC is not SMC or SYS reg
   trap it was assumed that it is an EA, which is not correct. Move
   the known Sync exceptions (EL3 Impdef) out of sync EA handler.
 - Report unhandled exceptions if there are SError from lower EL in
   KFH mode, as this is unexpected.
 - Move code out of ea_delegate.S which are used for KFH mode.

Signed-off-by: Manish Pandey <manish.pandey2@arm.com>
Change-Id: I577089677d0ec8cde7c20952172bee955573d2ed
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 3655467..ed48311 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -13,6 +13,7 @@
 #include <bl31/sync_handle.h>
 #include <common/runtime_svc.h>
 #include <context.h>
+#include <cpu_macros.S>
 #include <el3_common_macros.S>
 #include <lib/el3_runtime/cpu_data.h>
 #include <lib/smccc.h>
@@ -105,9 +106,19 @@
 	cmp	x30, #EC_AARCH64_SYS
 	b.eq	sync_handler64
 
-	/* Synchronous exceptions other than the above are assumed to be EA */
-	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+	cmp	x30, #EC_IMP_DEF_EL3
+	b.eq	imp_def_el3_handler
+
+	/* If FFH Support then try to handle lower EL EA exceptions. */
+#if FFH_SUPPORT
+	mrs	x30, scr_el3
+	tst	x30, #SCR_EA_BIT
+	b.eq	1f
 	b	handle_lower_el_sync_ea
+#endif
+1:
+	/* Synchronous exceptions other than the above are unhandled */
+	b	report_unhandled_exception
 	.endm
 
 vector_base runtime_exceptions
@@ -243,11 +254,15 @@
 	 * So reuse the sync mechanism to catch any further errors which are pending.
 	 */
 vector_entry serror_aarch64
+#if FFH_SUPPORT
 	save_x30
 	apply_at_speculative_wa
 	sync_and_handle_pending_serror
 	unmask_async_ea
 	b	handle_lower_el_async_ea
+#else
+	b	report_unhandled_exception
+#endif
 end_vector_entry serror_aarch64
 
 	/* ---------------------------------------------------------------------
@@ -289,11 +304,15 @@
 	 * So reuse the sync mechanism to catch any further errors which are pending.
 	 */
 vector_entry serror_aarch32
+#if FFH_SUPPORT
 	save_x30
 	apply_at_speculative_wa
 	sync_and_handle_pending_serror
 	unmask_async_ea
 	b	handle_lower_el_async_ea
+#else
+	b	report_unhandled_exception
+#endif
 end_vector_entry serror_aarch32
 
 #ifdef MONITOR_TRAPS
@@ -583,6 +602,114 @@
 	b	el3_exit
 endfunc handle_interrupt_exception
 
+func imp_def_el3_handler
+	/* Save GP registers */
+	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+
+	/* Get the cpu_ops pointer */
+	bl	get_cpu_ops_ptr
+
+	/* Get the cpu_ops exception handler */
+	ldr	x0, [x0, #CPU_E_HANDLER_FUNC]
+
+	/*
+	 * If the reserved function pointer is NULL, this CPU does not have an
+	 * implementation defined exception handler function
+	 */
+	cbz	x0, el3_handler_exit
+	mrs	x1, esr_el3
+	ubfx	x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+	blr	x0
+el3_handler_exit:
+	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+	restore_x30
+	no_ret	report_unhandled_exception
+endfunc imp_def_el3_handler
+
+/*
+ * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode.
+ *
+ * This scenario may arise when there is an error (EA) in the system which is not
+ * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
+ * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
+ *
+ * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is
+ * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL.
+ *
+ * This function assumes x30 has been saved.
+ */
+func reflect_pending_async_ea_to_lower_el
+	/*
+	 * As the original exception was not handled we need to ensure that we return
+	 * back to the instruction which caused the exception. To acheive that, eret
+	 * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
+	 * (Label "skip_smc_check").
+	 *
+	 * LIMITATION: It could be that async EA is masked at the target exception level
+	 * or the priority of async EA wrt to the EL3/secure interrupt is lower, which
+	 * causes back and forth between lower EL and EL3. In case of back and forth between
+	 * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
+	 * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
+	 * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop
+	 * counter retains its value but if we do a normal el3_exit this flag gets cleared.
+	 * However, setting SCR_EL3.IESB = 1, should give priority to SError handling
+	 * as per AArch64.TakeException pseudo code in Arm ARM.
+	 *
+	 * TODO: In future if EL3 gets a capability to inject a virtual SError to lower
+	 * ELs, we can remove the el3_panic and handle the original exception first and
+	 * inject SError to lower EL before ereting back.
+	 */
+	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
+	mrs	x28, elr_el3
+	cmp	x29, x28
+	b.eq	check_loop_ctr
+	str	x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
+	/* Zero the loop counter */
+	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+	b	skip_loop_ctr
+check_loop_ctr:
+	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+	add	x29, x29, #1
+	str	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+	cmp	x29, #ASYNC_EA_REPLAY_COUNTER
+	b.ge	el3_panic
+skip_loop_ctr:
+	/*
+	 * Logic to distinguish if we came from SMC or any other exception.
+	 * Use offsets in vector entry to get which exception we are handling.
+	 * In each vector entry of size 0x200, address "0x0-0x80" is for sync
+	 * exception and "0x80-0x200" is for async exceptions.
+	 * Use vector base address (vbar_el3) and exception offset (LR) to
+	 * calculate whether the address we came from is any of the following
+	 * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
+	 */
+	mrs	x29, vbar_el3
+	sub	x30, x30, x29
+	and	x30, x30, #0x1ff
+	cmp	x30, #0x80
+	b.ge	skip_smc_check
+	/* Its a synchronous exception, Now check if it is SMC or not? */
+	mrs	x30, esr_el3
+	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+	cmp	x30, #EC_AARCH32_SMC
+	b.eq	subtract_elr_el3
+	cmp	x30, #EC_AARCH64_SMC
+	b.eq	subtract_elr_el3
+	b	skip_smc_check
+subtract_elr_el3:
+	sub	x28, x28, #4
+skip_smc_check:
+	msr	elr_el3, x28
+	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+	exception_return
+endfunc reflect_pending_async_ea_to_lower_el
+
 	/* ---------------------------------------------------------------------
 	 * The following code handles exceptions caused by BRK instructions.
 	 * Following a BRK instruction, the only real valid cause of action is