Merge pull request #798 from douglas-raillard-arm/dr/fix_std_smc_after_suspend

Abort preempted TSP STD SMC after PSCI CPU suspend
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 25385ca..4c296d4 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -180,6 +180,7 @@
 	b	tsp_sel1_intr_entry
 	b	tsp_system_off_entry
 	b	tsp_system_reset_entry
+	b	tsp_abort_std_smc_entry
 endfunc tsp_vector_table
 
 	/*---------------------------------------------
@@ -441,3 +442,30 @@
 	/* Should never reach here */
 	no_ret	plat_panic_handler
 endfunc tsp_std_smc_entry
+
+	/*---------------------------------------------------------------------
+	 * This entrypoint is used by the TSPD to abort a pre-empted Standard
+	 * SMC. It could be on behalf of non-secure world or because a CPU
+	 * suspend/CPU off request needs to abort the preempted SMC.
+	 * --------------------------------------------------------------------
+	 */
+func tsp_abort_std_smc_entry
+
+	/*
+	 * Exceptions masking is already done by the TSPD when entering this
+	 * hook so there is no need to do it here.
+	 */
+
+	/* Reset the stack used by the pre-empted SMC */
+	bl	plat_set_my_stack
+
+	/*
+	 * Allow some cleanup such as releasing locks.
+	 */
+	bl	tsp_abort_smc_handler
+
+	restore_args_call_smc
+
+	/* Should never reach here */
+	bl	plat_panic_handler
+endfunc tsp_abort_std_smc_entry
diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c
index d03f7e2..2b36532 100644
--- a/bl32/tsp/tsp_main.c
+++ b/bl32/tsp/tsp_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -416,3 +416,20 @@
 			    0, 0, 0, 0);
 }
 
+/*******************************************************************************
+ * TSP smc abort handler. This function is called when aborting a preemtped
+ * standard SMC request. It should cleanup all resources owned by the SMC
+ * handler such as locks or dynamically allocated memory so following SMC
+ * request are executed in a clean environment.
+ ******************************************************************************/
+tsp_args_t *tsp_abort_smc_handler(uint64_t func,
+				  uint64_t arg1,
+				  uint64_t arg2,
+				  uint64_t arg3,
+				  uint64_t arg4,
+				  uint64_t arg5,
+				  uint64_t arg6,
+				  uint64_t arg7)
+{
+	return set_smc_args(TSP_ABORT_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
diff --git a/include/bl32/tsp/tsp.h b/include/bl32/tsp/tsp.h
index 4d7bc23..1e35788 100644
--- a/include/bl32/tsp/tsp.h
+++ b/include/bl32/tsp/tsp.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@
 #define TSP_SUSPEND_DONE	0xf2000003
 #define TSP_RESUME_DONE		0xf2000004
 #define TSP_PREEMPTED		0xf2000005
+#define TSP_ABORT_DONE		0xf2000007
 #define TSP_SYSTEM_OFF_DONE	0xf2000008
 #define TSP_SYSTEM_RESET_DONE	0xf2000009
 
@@ -81,10 +82,17 @@
 /* SMC function ID to request a previously preempted std smc */
 #define TSP_FID_RESUME		TSP_STD_FID(0x3000)
 /*
+ * SMC function ID to request abortion of a previously preempted std smc. A
+ * fast SMC is used so that the TSP abort handler does not have to be
+ * reentrant.
+ */
+#define TSP_FID_ABORT		TSP_FAST_FID(0x3001)
+
+/*
  * Total number of function IDs implemented for services offered to NS clients.
  * The function IDs are defined above
  */
-#define TSP_NUM_FID		0x4
+#define TSP_NUM_FID		0x5
 
 /* TSP implementation version numbers */
 #define TSP_VERSION_MAJOR	0x0 /* Major version */
@@ -117,6 +125,7 @@
 	tsp_vector_isn_t sel1_intr_entry;
 	tsp_vector_isn_t system_off_entry;
 	tsp_vector_isn_t system_reset_entry;
+	tsp_vector_isn_t abort_std_smc_entry;
 } tsp_vectors_t;
 
 
diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c
index 322413c..3dcefea 100644
--- a/services/spd/tspd/tspd_common.c
+++ b/services/spd/tspd/tspd_common.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -32,7 +32,9 @@
 #include <assert.h>
 #include <bl_common.h>
 #include <context_mgmt.h>
+#include <debug.h>
 #include <string.h>
+#include <tsp.h>
 #include "tspd_private.h"
 
 /*******************************************************************************
@@ -129,3 +131,31 @@
 	/* Should never reach here */
 	assert(0);
 }
+
+/*******************************************************************************
+ * This function takes an SP context pointer and abort any preempted SMC
+ * request.
+ * Return 1 if there was a preempted SMC request, 0 otherwise.
+ ******************************************************************************/
+int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx)
+{
+	if (!get_std_smc_active_flag(tsp_ctx->state))
+		return 0;
+
+	/* Abort any preempted SMC request */
+	clr_std_smc_active_flag(tsp_ctx->state);
+
+	/*
+	 * Arrange for an entry into the test secure payload. It will
+	 * be returned via TSP_ABORT_DONE case in tspd_smc_handler.
+	 */
+	cm_set_elr_el3(SECURE,
+		       (uint64_t) &tsp_vectors->abort_std_smc_entry);
+	uint64_t rc = tspd_synchronous_sp_entry(tsp_ctx);
+
+	if (rc != 0)
+		panic();
+
+	return 1;
+}
+
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index 1a06459..2850e70 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -459,6 +459,11 @@
 		 */
 		tspd_synchronous_sp_exit(tsp_ctx, x1);
 #endif
+	/*
+	 * This function ID is used only by the SP to indicate it has finished
+	 * aborting a preempted Standard SMC request.
+	 */
+	case TSP_ABORT_DONE:
 
 	/*
 	 * These function IDs are used only by the SP to indicate it has
@@ -595,6 +600,26 @@
 			SMC_RET3(ns_cpu_context, x1, x2, x3);
 		}
 
+		break;
+	/*
+	 * Request from the non-secure world to abort a preempted Standard SMC
+	 * call.
+	 */
+	case TSP_FID_ABORT:
+		/* ABORT should only be invoked by normal world */
+		if (!ns) {
+			assert(0);
+			break;
+		}
+
+		/* Abort the preempted SMC request */
+		if (!tspd_abort_preempted_smc(tsp_ctx))
+			/*
+			 * If there was no preempted SMC to abort, return
+			 * SMC_UNK.
+			 */
+			SMC_RET1(handle, SMC_UNK);
+
 		break;
 
 		/*
diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c
index 55562ba..bc5435a 100644
--- a/services/spd/tspd/tspd_pm.c
+++ b/services/spd/tspd/tspd_pm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -58,6 +58,12 @@
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
 
+	/*
+	 * Abort any preempted SMC request before overwriting the SECURE
+	 * context.
+	 */
+	tspd_abort_preempted_smc(tsp_ctx);
+
 	/* Program the entry point and enter the TSP */
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_off_entry);
 	rc = tspd_synchronous_sp_entry(tsp_ctx);
@@ -75,7 +81,7 @@
 	 */
 	set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
 
-	 return 0;
+	return 0;
 }
 
 /*******************************************************************************
@@ -91,6 +97,12 @@
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
 
+	/*
+	 * Abort any preempted SMC request before overwriting the SECURE
+	 * context.
+	 */
+	tspd_abort_preempted_smc(tsp_ctx);
+
 	/* Program the entry point and enter the TSP */
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_suspend_entry);
 	rc = tspd_synchronous_sp_entry(tsp_ctx);
@@ -99,7 +111,7 @@
 	 * Read the response from the TSP. A non-zero return means that
 	 * something went wrong while communicating with the TSP.
 	 */
-	if (rc != 0)
+	if (rc)
 		panic();
 
 	/* Update its context to reflect the state the TSP is in */
@@ -108,7 +120,7 @@
 
 /*******************************************************************************
  * This cpu has been turned on. Enter the TSP to initialise S-EL1 and other bits
- * before passing control back to the Secure Monitor. Entry in S-El1 is done
+ * before passing control back to the Secure Monitor. Entry in S-EL1 is done
  * after initialising minimal architectural state that guarantees safe
  * execution.
  ******************************************************************************/
@@ -205,6 +217,12 @@
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
 
+	/*
+	 * Abort any preempted SMC request before overwriting the SECURE
+	 * context.
+	 */
+	tspd_abort_preempted_smc(tsp_ctx);
+
 	/* Program the entry point */
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_off_entry);
 
@@ -225,11 +243,19 @@
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
 
+	/*
+	 * Abort any preempted SMC request before overwriting the SECURE
+	 * context.
+	 */
+	tspd_abort_preempted_smc(tsp_ctx);
+
 	/* Program the entry point */
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_reset_entry);
 
-	/* Enter the TSP. We do not care about the return value because we
-	 * must continue the reset anyway */
+	/*
+	 * Enter the TSP. We do not care about the return value because we
+	 * must continue the reset anyway
+	 */
 	tspd_synchronous_sp_entry(tsp_ctx);
 }
 
diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h
index cadc6aa..82039a4 100644
--- a/services/spd/tspd/tspd_private.h
+++ b/services/spd/tspd/tspd_private.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -242,6 +242,7 @@
 				uint32_t rw,
 				uint64_t pc,
 				tsp_context_t *tsp_ctx);
+int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx);
 
 extern tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
 extern struct tsp_vectors *tsp_vectors;