AArch32: Rework SMC context save and restore mechanism

The current SMC context data structure `smc_ctx_t` and related helpers are
optimized for case when SMC call does not result in world switch. This was
the case for SP_MIN and BL1 cold boot flow. But the firmware update usecase
requires world switch as a result of SMC and the current SMC context helpers
were not helping very much in this regard. Therefore this patch does the
following changes to improve this:

1. Add monitor stack pointer, `spmon` to `smc_ctx_t`

The C Runtime stack pointer in monitor mode, `sp_mon` is added to the
SMC context, and the `smc_ctx_t` pointer is cached in `sp_mon` prior
to exit from Monitor mode. This makes is easier to retrieve the
context when the next SMC call happens. As a result of this change,
the SMC context helpers no longer depend on the stack to save and
restore the register.

This aligns it with the context save and restore mechanism in AArch64.

2. Add SCR in `smc_ctx_t`

Adding the SCR register to `smc_ctx_t` makes it easier to manage this
register state when switching between non secure and secure world as a
result of an SMC call.

Change-Id: I5e12a7056107c1701b457b8f7363fdbf892230bf
Signed-off-by: Soby Mathew <soby.mathew@arm.com>
Signed-off-by: dp-arm <dimitris.papastamos@arm.com>
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
index ebbee5a..e145511 100644
--- a/bl32/sp_min/aarch32/entrypoint.S
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -115,21 +115,10 @@
 	sub	r1, r1, r0
 	bl	clean_dcache_range
 
-	/* Program the registers in cpu_context and exit monitor mode */
-	mov	r0, #NON_SECURE
-	bl	cm_get_context
-
-	/* Restore the SCR */
-	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
-	stcopr	r2, SCR
-	isb
-
-	/* Restore the SCTLR  */
-	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
-	stcopr	r2, SCTLR
-
 	bl	smc_get_next_ctx
-	/* The other cpu_context registers have been copied to smc context */
+
+	/* r0 points to `smc_ctx_t` */
+	/* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
 	b	sp_min_exit
 endfunc sp_min_entrypoint
 
@@ -138,46 +127,44 @@
  * SMC handling function for SP_MIN.
  */
 func handle_smc
-	smcc_save_gp_mode_regs
+	/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+	str	lr, [sp, #SMC_CTX_LR_MON]
 
-	/* r0 points to smc_context */
-	mov	r2, r0				/* handle */
-	ldcopr	r0, SCR
+	smcc_save_gp_mode_regs
 
 	/*
-	 * Save SCR in stack. r1 is pushed to meet the 8 byte
-	 * stack alignment requirement.
+	 * `sp` still points to `smc_ctx_t`. Save it to a register
+	 * and restore the C runtime stack pointer to `sp`.
 	 */
-	push	{r0, r1}
+	mov	r2, sp				/* handle */
+	ldr	sp, [r2, #SMC_CTX_SP_MON]
+
+	ldr	r0, [r2, #SMC_CTX_SCR]
 	and	r3, r0, #SCR_NS_BIT		/* flags */
 
 	/* Switch to Secure Mode*/
 	bic	r0, #SCR_NS_BIT
 	stcopr	r0, SCR
 	isb
+
 	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
 	/* Check whether an SMC64 is issued */
 	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
-	beq	1f	/* SMC32 is detected */
+	beq	1f
+	/* SMC32 is not detected. Return error back to caller */
 	mov	r0, #SMC_UNK
 	str	r0, [r2, #SMC_CTX_GPREG_R0]
 	mov	r0, r2
-	b	2f	/* Skip handling the SMC */
+	b	sp_min_exit
 1:
+	/* SMC32 is detected */
 	mov	r1, #0				/* cookie */
 	bl	handle_runtime_svc
-2:
-	/* r0 points to smc context */
 
-	/* Restore SCR from stack */
-	pop	{r1, r2}
-	stcopr	r1, SCR
-	isb
-
+	/* `r0` points to `smc_ctx_t` */
 	b	sp_min_exit
 endfunc handle_smc
 
-
 /*
  * The Warm boot entrypoint for SP_MIN.
  */
@@ -234,23 +221,9 @@
 #endif
 
 	bl	sp_min_warm_boot
-
-	/* Program the registers in cpu_context and exit monitor mode */
-	mov	r0, #NON_SECURE
-	bl	cm_get_context
-
-	/* Restore the SCR */
-	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
-	stcopr	r2, SCR
-	isb
-
-	/* Restore the SCTLR  */
-	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
-	stcopr	r2, SCTLR
-
 	bl	smc_get_next_ctx
-
-	/* The other cpu_context registers have been copied to smc context */
+	/* r0 points to `smc_ctx_t` */
+	/* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
 	b	sp_min_exit
 endfunc sp_min_warm_entrypoint
 
@@ -261,6 +234,5 @@
  * Arguments : r0 must point to the SMC context to restore from.
  */
 func sp_min_exit
-	smcc_restore_gp_mode_regs
-	eret
+	monitor_exit
 endfunc sp_min_exit
diff --git a/bl32/sp_min/sp_min_main.c b/bl32/sp_min/sp_min_main.c
index d47b82a..45ad03f 100644
--- a/bl32/sp_min/sp_min_main.c
+++ b/bl32/sp_min/sp_min_main.c
@@ -101,6 +101,7 @@
 	next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
 	next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
 	next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+	next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
 }
 
 /*******************************************************************************
@@ -111,6 +112,8 @@
 static void sp_min_prepare_next_image_entry(void)
 {
 	entry_point_info_t *next_image_info;
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	u_register_t ns_sctlr;
 
 	/* Program system registers to proceed to non-secure */
 	next_image_info = sp_min_plat_get_bl33_ep_info();
@@ -125,6 +128,16 @@
 	/* Copy r0, lr and spsr from cpu context to SMC context */
 	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
 			smc_get_next_ctx());
+
+	/* Temporarily set the NS bit to access NS SCTLR */
+	write_scr(read_scr() | SCR_NS_BIT);
+	isb();
+	ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+	write_sctlr(ns_sctlr);
+	isb();
+
+	write_scr(read_scr() & ~SCR_NS_BIT);
+	isb();
 }
 
 /******************************************************************************