Merge pull request #760 from Xilinx/zynqmp-2016-11
ZynqMP Updates
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index f333bf1..220d1cc 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -38,10 +38,10 @@
.globl runtime_exceptions
- /* -----------------------------------------------------
- * Handle SMC exceptions separately from other sync.
- * exceptions.
- * -----------------------------------------------------
+ /* ---------------------------------------------------------------------
+ * This macro handles Synchronous exceptions.
+ * Only SMC exceptions are supported.
+ * ---------------------------------------------------------------------
*/
.macro handle_sync_exception
/* Enable the SError interrupt */
@@ -50,11 +50,10 @@
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
#if ENABLE_RUNTIME_INSTRUMENTATION
-
/*
- * Read the timestamp value and store it in per-cpu data.
- * The value will be extracted from per-cpu data by the
- * C level SMC handler and saved to the PMF timestamp region.
+ * Read the timestamp value and store it in per-cpu data. The value
+ * will be extracted from per-cpu data by the C level SMC handler and
+ * saved to the PMF timestamp region.
*/
mrs x30, cntpct_el0
str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
@@ -66,26 +65,22 @@
mrs x30, esr_el3
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ /* Handle SMC exceptions separately from other synchronous exceptions */
cmp x30, #EC_AARCH32_SMC
b.eq smc_handler32
cmp x30, #EC_AARCH64_SMC
b.eq smc_handler64
- /* -----------------------------------------------------
- * The following code handles any synchronous exception
- * that is not an SMC.
- * -----------------------------------------------------
- */
-
+ /* Other kinds of synchronous exceptions are not handled */
bl report_unhandled_exception
.endm
- /* -----------------------------------------------------
- * This macro handles FIQ or IRQ interrupts i.e. EL3,
- * S-EL1 and NS interrupts.
- * -----------------------------------------------------
+ /* ---------------------------------------------------------------------
+ * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
+ * interrupts.
+ * ---------------------------------------------------------------------
*/
.macro handle_interrupt_exception label
/* Enable the SError interrupt */
@@ -94,10 +89,7 @@
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
bl save_gp_registers
- /*
- * Save the EL3 system registers needed to return from
- * this exception.
- */
+ /* Save the EL3 system registers needed to return from this exception */
mrs x0, spsr_el3
mrs x1, elr_el3
stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
@@ -109,36 +101,34 @@
mov sp, x2
/*
- * Find out whether this is a valid interrupt type. If the
- * interrupt controller reports a spurious interrupt then
- * return to where we came from.
+ * Find out whether this is a valid interrupt type.
+ * If the interrupt controller reports a spurious interrupt then return
+ * to where we came from.
*/
bl plat_ic_get_pending_interrupt_type
cmp x0, #INTR_TYPE_INVAL
b.eq interrupt_exit_\label
/*
- * Get the registered handler for this interrupt type. A
- * NULL return value could be 'cause of the following
- * conditions:
+ * Get the registered handler for this interrupt type.
+ * A NULL return value could be 'cause of the following conditions:
*
- * a. An interrupt of a type was routed correctly but a
- * handler for its type was not registered.
+ * a. An interrupt of a type was routed correctly but a handler for its
+ * type was not registered.
*
- * b. An interrupt of a type was not routed correctly so
- * a handler for its type was not registered.
+ * b. An interrupt of a type was not routed correctly so a handler for
+ * its type was not registered.
*
- * c. An interrupt of a type was routed correctly to EL3,
- * but was deasserted before its pending state could
- * be read. Another interrupt of a different type pended
- * at the same time and its type was reported as pending
- * instead. However, a handler for this type was not
- * registered.
+ * c. An interrupt of a type was routed correctly to EL3, but was
+ * deasserted before its pending state could be read. Another
+ * interrupt of a different type pended at the same time and its
+ * type was reported as pending instead. However, a handler for this
+ * type was not registered.
*
- * a. and b. can only happen due to a programming error.
- * The occurrence of c. could be beyond the control of
- * Trusted Firmware. It makes sense to return from this
- * exception instead of reporting an error.
+ * a. and b. can only happen due to a programming error. The
+ * occurrence of c. could be beyond the control of Trusted Firmware.
+ * It makes sense to return from this exception instead of reporting an
+ * error.
*/
bl get_interrupt_type_handler
cbz x0, interrupt_exit_\label
@@ -153,7 +143,7 @@
/* Restore the reference to the 'handle' i.e. SP_EL3 */
mov x2, x20
- /* x3 will point to a cookie (not used now) */
+ /* x3 will point to a cookie (not used now) */
mov x3, xzr
/* Call the interrupt type handler */
@@ -180,24 +170,20 @@
vector_base runtime_exceptions
- /* -----------------------------------------------------
- * Current EL with _sp_el0 : 0x0 - 0x200
- * -----------------------------------------------------
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
*/
vector_entry sync_exception_sp_el0
- /* -----------------------------------------------------
- * We don't expect any synchronous exceptions from EL3
- * -----------------------------------------------------
- */
+ /* We don't expect any synchronous exceptions from EL3 */
bl report_unhandled_exception
check_vector_size sync_exception_sp_el0
- /* -----------------------------------------------------
- * EL3 code is non-reentrant. Any asynchronous exception
- * is a serious error. Loop infinitely.
- * -----------------------------------------------------
- */
vector_entry irq_sp_el0
+ /*
+ * EL3 code is non-reentrant. Any asynchronous exception is a serious
+ * error. Loop infinitely.
+ */
bl report_unhandled_interrupt
check_vector_size irq_sp_el0
@@ -211,18 +197,16 @@
bl report_unhandled_exception
check_vector_size serror_sp_el0
- /* -----------------------------------------------------
- * Current EL with SPx: 0x200 - 0x400
- * -----------------------------------------------------
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
*/
-
vector_entry sync_exception_sp_elx
- /* -----------------------------------------------------
- * This exception will trigger if anything went wrong
- * during a previous exception entry or exit or while
- * handling an earlier unexpected synchronous exception.
- * There is a high probability that SP_EL3 is corrupted.
- * -----------------------------------------------------
+ /*
+ * This exception will trigger if anything went wrong during a previous
+ * exception entry or exit or while handling an earlier unexpected
+ * synchronous exception. There is a high probability that SP_EL3 is
+ * corrupted.
*/
bl report_unhandled_exception
check_vector_size sync_exception_sp_elx
@@ -239,27 +223,20 @@
bl report_unhandled_exception
check_vector_size serror_sp_elx
- /* -----------------------------------------------------
+ /* ---------------------------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x600
- * -----------------------------------------------------
+ * ---------------------------------------------------------------------
*/
vector_entry sync_exception_aarch64
- /* -----------------------------------------------------
- * This exception vector will be the entry point for
- * SMCs and traps that are unhandled at lower ELs most
- * commonly. SP_EL3 should point to a valid cpu context
- * where the general purpose and system register state
- * can be saved.
- * -----------------------------------------------------
+ /*
+ * This exception vector will be the entry point for SMCs and traps
+ * that are unhandled at lower ELs most commonly. SP_EL3 should point
+ * to a valid cpu context where the general purpose and system register
+ * state can be saved.
*/
handle_sync_exception
check_vector_size sync_exception_aarch64
- /* -----------------------------------------------------
- * Asynchronous exceptions from lower ELs are not
- * currently supported. Report their occurrence.
- * -----------------------------------------------------
- */
vector_entry irq_aarch64
handle_interrupt_exception irq_aarch64
check_vector_size irq_aarch64
@@ -269,30 +246,27 @@
check_vector_size fiq_aarch64
vector_entry serror_aarch64
+ /*
+ * SError exceptions from lower ELs are not currently supported.
+ * Report their occurrence.
+ */
bl report_unhandled_exception
check_vector_size serror_aarch64
- /* -----------------------------------------------------
+ /* ---------------------------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x800
- * -----------------------------------------------------
+ * ---------------------------------------------------------------------
*/
vector_entry sync_exception_aarch32
- /* -----------------------------------------------------
- * This exception vector will be the entry point for
- * SMCs and traps that are unhandled at lower ELs most
- * commonly. SP_EL3 should point to a valid cpu context
- * where the general purpose and system register state
- * can be saved.
- * -----------------------------------------------------
+ /*
+ * This exception vector will be the entry point for SMCs and traps
+ * that are unhandled at lower ELs most commonly. SP_EL3 should point
+ * to a valid cpu context where the general purpose and system register
+ * state can be saved.
*/
handle_sync_exception
check_vector_size sync_exception_aarch32
- /* -----------------------------------------------------
- * Asynchronous exceptions from lower ELs are not
- * currently supported. Report their occurrence.
- * -----------------------------------------------------
- */
vector_entry irq_aarch32
handle_interrupt_exception irq_aarch32
check_vector_size irq_aarch32
@@ -302,34 +276,34 @@
check_vector_size fiq_aarch32
vector_entry serror_aarch32
+ /*
+ * SError exceptions from lower ELs are not currently supported.
+ * Report their occurrence.
+ */
bl report_unhandled_exception
check_vector_size serror_aarch32
- /* -----------------------------------------------------
+ /* ---------------------------------------------------------------------
* The following code handles secure monitor calls.
- * Depending upon the execution state from where the SMC
- * has been invoked, it frees some general purpose
- * registers to perform the remaining tasks. They
- * involve finding the runtime service handler that is
- * the target of the SMC & switching to runtime stacks
- * (SP_EL0) before calling the handler.
+ * Depending upon the execution state from where the SMC has been
+ * invoked, it frees some general purpose registers to perform the
+ * remaining tasks. They involve finding the runtime service handler
+ * that is the target of the SMC & switching to runtime stacks (SP_EL0)
+ * before calling the handler.
*
- * Note that x30 has been explicitly saved and can be
- * used here
- * -----------------------------------------------------
+ * Note that x30 has been explicitly saved and can be used here
+ * ---------------------------------------------------------------------
*/
func smc_handler
smc_handler32:
/* Check whether aarch32 issued an SMC64 */
tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
- /* -----------------------------------------------------
- * Since we're are coming from aarch32, x8-x18 need to
- * be saved as per SMC32 calling convention. If a lower
- * EL in aarch64 is making an SMC32 call then it must
- * have saved x8-x17 already therein.
- * -----------------------------------------------------
+ /*
+ * Since we're are coming from aarch32, x8-x18 need to be saved as per
+ * SMC32 calling convention. If a lower EL in aarch64 is making an
+ * SMC32 call then it must have saved x8-x17 already therein.
*/
stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
@@ -340,15 +314,14 @@
/* x4-x7, x18, sp_el0 are saved below */
smc_handler64:
- /* -----------------------------------------------------
- * Populate the parameters for the SMC handler. We
- * already have x0-x4 in place. x5 will point to a
- * cookie (not used now). x6 will point to the context
- * structure (SP_EL3) and x7 will contain flags we need
- * to pass to the handler Hence save x5-x7. Note that x4
- * only needs to be preserved for AArch32 callers but we
- * do it for AArch64 callers as well for convenience
- * -----------------------------------------------------
+ /*
+ * Populate the parameters for the SMC handler.
+ * We already have x0-x4 in place. x5 will point to a cookie (not used
+ * now). x6 will point to the context structure (SP_EL3) and x7 will
+ * contain flags we need to pass to the handler Hence save x5-x7.
+ *
+ * Note: x4 only needs to be preserved for AArch32 callers but we do it
+ * for AArch64 callers as well for convenience
*/
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
@@ -370,12 +343,10 @@
adr x14, rt_svc_descs_indices
ldrb w15, [x14, x16]
- /* -----------------------------------------------------
- * Restore the saved C runtime stack value which will
- * become the new SP_EL0 i.e. EL3 runtime stack. It was
- * saved in the 'cpu_context' structure prior to the last
- * ERET from EL3.
- * -----------------------------------------------------
+ /*
+ * Restore the saved C runtime stack value which will become the new
+ * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
+ * structure prior to the last ERET from EL3.
*/
ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
@@ -388,22 +359,19 @@
/* Switch to SP_EL0 */
msr spsel, #0
- /* -----------------------------------------------------
+ /*
* Get the descriptor using the index
* x11 = (base + off), x15 = index
*
* handler = (base + off) + (index << log2(size))
- * -----------------------------------------------------
*/
lsl w10, w15, #RT_SVC_SIZE_LOG2
ldr x15, [x11, w10, uxtw]
- /* -----------------------------------------------------
- * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
- * is a world switch during SMC handling.
- * TODO: Revisit if all system registers can be saved
- * later.
- * -----------------------------------------------------
+ /*
+ * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
+ * switch during SMC handling.
+ * TODO: Revisit if all system registers can be saved later.
*/
mrs x16, spsr_el3
mrs x17, elr_el3
@@ -416,12 +384,10 @@
mov sp, x12
- /* -----------------------------------------------------
- * Call the Secure Monitor Call handler and then drop
- * directly into el3_exit() which will program any
- * remaining architectural state prior to issuing the
- * ERET to the desired lower EL.
- * -----------------------------------------------------
+ /*
+ * Call the Secure Monitor Call handler and then drop directly into
+ * el3_exit() which will program any remaining architectural state
+ * prior to issuing the ERET to the desired lower EL.
*/
#if DEBUG
cbz x15, rt_svc_fw_critical_error
@@ -436,7 +402,7 @@
* callers will find the registers contents unchanged, but AArch64
* callers will find the registers modified (with stale earlier NS
* content). Either way, we aren't leaking any secure information
- * through them
+ * through them.
*/
mov w0, #SMC_UNK
b restore_gp_registers_callee_eret
@@ -447,6 +413,7 @@
eret
rt_svc_fw_critical_error:
- msr spsel, #1 /* Switch to SP_ELx */
+ /* Switch to SP_ELx */
+ msr spsel, #1
bl report_unhandled_exception
endfunc smc_handler
diff --git a/docs/firmware-design.md b/docs/firmware-design.md
index abe7dc5..c37f9c5 100644
--- a/docs/firmware-design.md
+++ b/docs/firmware-design.md
@@ -1077,7 +1077,7 @@
Details for implementing a CPU specific reset handler can be found in
Section 8. Details for implementing a platform specific reset handler can be
-found in the [Porting Guide](see the `plat_reset_handler()` function).
+found in the [Porting Guide] (see the `plat_reset_handler()` function).
When adding functionality to a reset handler, keep in mind that if a different
reset handling behavior is required between the first and the subsequent
diff --git a/docs/interrupt-framework-design.md b/docs/interrupt-framework-design.md
index e50d175..b468949 100644
--- a/docs/interrupt-framework-design.md
+++ b/docs/interrupt-framework-design.md
@@ -335,9 +335,9 @@
This component declares the following prototype for a handler of an interrupt type.
typedef uint64_t (*interrupt_type_handler_t)(uint32_t id,
- uint32_t flags,
- void *handle,
- void *cookie);
+ uint32_t flags,
+ void *handle,
+ void *cookie);
The `id` is parameter is reserved and could be used in the future for passing
the interrupt id of the highest pending interrupt only if there is a foolproof
@@ -358,10 +358,16 @@
for the security state specified in the `flags` parameter.
Once the handler routine completes, execution will return to either the secure
-or non-secure state. The handler routine should return a pointer to
-`cpu_context` structure of the current CPU for the target security state. It
-should treat all error conditions as critical errors and take appropriate action
-within its implementation e.g. use assertion failures.
+or non-secure state. The handler routine must return a pointer to
+`cpu_context` structure of the current CPU for the target security state. On
+AArch64, this return value is currently ignored by the caller as the
+appropriate `cpu_context` to be used is expected to be set by the handler
+via the context management library APIs.
+A portable interrupt handler implementation must set the target context both in
+the structure pointed to by the returned pointer and via the context management
+library APIs. The handler should treat all error conditions as critical errors
+and take appropriate action within its implementation e.g. use assertion
+failures.
The runtime firmware provides the following API for registering a handler for a
particular type of interrupt. A Secure Payload Dispatcher service should use
@@ -370,8 +376,8 @@
the type of interrupt.
int32_t register_interrupt_type_handler(uint32_t type,
- interrupt_type_handler handler,
- uint64_t flags);
+ interrupt_type_handler handler,
+ uint64_t flags);
The `type` parameter can be one of the three interrupt types listed above i.e.
@@ -962,13 +968,13 @@
secure software sequence for issuing a `standard` SMC would look like this,
assuming `P.STATE.I=0` in the non secure state :
- int rc;
- rc = smc(TSP_STD_SMC_FID, ...); /* Issue a Standard SMC call */
- /* The pending non-secure interrupt is handled by the interrupt handler
- and returns back here. */
- while (rc == SMC_PREEMPTED) { /* Check if the SMC call is preempted */
- rc = smc(TSP_FID_RESUME); /* Issue resume SMC call */
- }
+ int rc;
+ rc = smc(TSP_STD_SMC_FID, ...); /* Issue a Standard SMC call */
+ /* The pending non-secure interrupt is handled by the interrupt handler
+ and returns back here. */
+ while (rc == SMC_PREEMPTED) { /* Check if the SMC call is preempted */
+ rc = smc(TSP_FID_RESUME); /* Issue resume SMC call */
+ }
The `TSP_STD_SMC_FID` is any `standard` SMC function identifier and the smc()
function invokes a SMC call with the required arguments. The pending non-secure
diff --git a/docs/porting-guide.md b/docs/porting-guide.md
index 7534e39..74a0a85 100644
--- a/docs/porting-guide.md
+++ b/docs/porting-guide.md
@@ -1834,6 +1834,18 @@
the `pwr_domain_on_finish()` operation. The generic code expects the platform
to succeed.
+#### plat_psci_ops.system_off()
+
+This function is called by PSCI implementation in response to a `SYSTEM_OFF`
+call. It performs the platform-specific system poweroff sequence after
+notifying the Secure Payload Dispatcher.
+
+#### plat_psci_ops.system_reset()
+
+This function is called by PSCI implementation in response to a `SYSTEM_RESET`
+call. It performs the platform-specific system reset sequence after
+notifying the Secure Payload Dispatcher.
+
#### plat_psci_ops.validate_power_state()
This function is called by the PSCI implementation during the `CPU_SUSPEND`
diff --git a/docs/spd/trusty-dispatcher.md b/docs/spd/trusty-dispatcher.md
new file mode 100644
index 0000000..0258959
--- /dev/null
+++ b/docs/spd/trusty-dispatcher.md
@@ -0,0 +1,15 @@
+Trusty Dispatcher
+=================
+Trusty is a a set of software components, supporting a Trusted Execution
+Environment (TEE) on mobile devices, published and maintained by Google.
+
+Detailed information and build instructions can be found on the Android
+Open Source Project (AOSP) webpage for Trusty hosted at
+https://source.android.com/security/trusty
+
+Supported platforms
+===================
+Out of all the platforms supported by the ARM Trusted Firmware, Trusty is
+verified and supported by NVIDIA's Tegra SoCs.
+
+
diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S
index 50ce952..0018ea4 100644
--- a/include/common/aarch32/el3_common_macros.S
+++ b/include/common/aarch32/el3_common_macros.S
@@ -67,6 +67,14 @@
orr r0, r0, #SCR_SIF_BIT
stcopr r0, SCR
+ /* -----------------------------------------------------------------
+ * Reset those registers that may have architecturally unknown reset
+ * values
+ * -----------------------------------------------------------------
+ */
+ mov r0, #0
+ stcopr r0, SDCR
+
/* -----------------------------------------------------
* Enable the Asynchronous data abort now that the
* exception vectors have been setup.
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
index 9b22a73..a418911 100644
--- a/include/common/aarch64/el3_common_macros.S
+++ b/include/common/aarch64/el3_common_macros.S
@@ -77,6 +77,13 @@
*/
mov x0, #(SCR_RES1_BITS | SCR_EA_BIT | SCR_SIF_BIT)
msr scr_el3, x0
+
+ /* ---------------------------------------------------------------------
+ * Reset registers that may have architecturally unknown reset values
+ * ---------------------------------------------------------------------
+ */
+ msr mdcr_el3, xzr
+
/* ---------------------------------------------------------------------
* Enable External Aborts and SError Interrupts now that the exception
* vectors have been setup.
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 4968e24..3c5ab26 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -318,6 +318,11 @@
#define MAX_CACHE_LINE_SIZE 0x800 /* 2KB */
+/* PMCR definitions */
+#define PMCR_N_SHIFT 11
+#define PMCR_N_MASK 0x1f
+#define PMCR_N_BITS (PMCR_N_MASK << PMCR_N_SHIFT)
+
/*******************************************************************************
* Definitions of register offsets and fields in the CNTCTLBase Frame of the
* system level implementation of the Generic Timer.
@@ -375,6 +380,11 @@
#define CSSELR p15, 2, c0, c0, 0
#define CCSIDR p15, 1, c0, c0, 0
+/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define HDCR p15, 4, c1, c1, 1
+#define SDCR p15, 0, c1, c3, 1
+#define PMCR p15, 0, c9, c12, 0
+
/* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
#define ICC_IAR1 p15, 0, c12, c12, 0
#define ICC_IAR0 p15, 0, c12, c8, 0
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index 3b4349c..0633bca 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -249,6 +249,9 @@
DEFINE_COPROCR_RW_FUNCS(icc_eoir0_el1, ICC_EOIR0)
DEFINE_COPROCR_RW_FUNCS(icc_eoir1_el1, ICC_EOIR1)
+DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR)
+DEFINE_COPROCR_READ_FUNC(pmcr, PMCR)
+
/*
* TLBI operation prototypes
*/
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index bef6032..a034ae2 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -411,4 +411,9 @@
#define CNTACR_RWVT_SHIFT 0x4
#define CNTACR_RWPT_SHIFT 0x5
+/* PMCR_EL0 definitions */
+#define PMCR_EL0_N_SHIFT 11
+#define PMCR_EL0_N_MASK 0x1f
+#define PMCR_EL0_N_BITS (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
+
#endif /* __ARCH_H__ */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 4d936ad..37db031 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -279,6 +279,9 @@
DEFINE_SYSREG_READ_FUNC(ctr_el0)
+DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
+DEFINE_SYSREG_READ_FUNC(pmcr_el0)
+
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 02ae2a7..29532e8 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -200,7 +200,10 @@
isb();
} else if (read_id_pfr1() &
(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
- /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */
+ /*
+ * Set the NS bit to access NS copies of certain banked
+ * registers
+ */
write_scr(read_scr() | SCR_NS_BIT);
isb();
@@ -231,6 +234,15 @@
* translation are disabled.
*/
write64_vttbr(0);
+
+ /*
+ * Avoid unexpected debug traps in case where HDCR
+ * is not completely reset by the hardware - set
+ * HDCR.HPMN to PMCR.N and zero the remaining bits.
+ * The HDCR.HPMN and PMCR.N fields are the same size
+ * (5 bits) and HPMN is at offset zero within HDCR.
+ */
+ write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT);
isb();
write_scr(read_scr() & ~SCR_NS_BIT);
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 4b5d0ee..fadc1db 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -259,6 +259,16 @@
* translation are disabled.
*/
write_vttbr_el2(0);
+ /*
+ * Avoid unexpected debug traps in case where MDCR_EL2
+ * is not completely reset by the hardware - set
+ * MDCR_EL2.HPMN to PMCR_EL0.N and zero the remaining
+ * bits.
+ * MDCR_EL2.HPMN and PMCR_EL0.N fields are the same size
+ * (5 bits) and HPMN is at offset zero within MDCR_EL2.
+ */
+ write_mdcr_el2((read_pmcr_el0() & PMCR_EL0_N_BITS)
+ >> PMCR_EL0_N_SHIFT);
}
}
diff --git a/readme.md b/readme.md
index d9a1714..ef5f6ee 100644
--- a/readme.md
+++ b/readme.md
@@ -105,7 +105,7 @@
of the [Juno ARM Development Platform] [Juno] with [Linaro Release 16.06].
The AArch64 build of this release has been tested on the following ARM
-[FVP]s (64-bit host machine only):
+[FVP]s (64-bit host machine only, with [Linaro Release 16.06]):
* `Foundation_Platform` (Version 10.1, Build 10.1.32)
* `FVP_Base_AEMv8A-AEMv8A` (Version 7.7, Build 0.8.7701)
@@ -114,7 +114,7 @@
* `FVP_Base_Cortex-A57x2-A53x4` (Version 7.7, Build 0.8.7701)
The AArch32 build of this release has been tested on the following ARM
-[FVP]s (64-bit host machine only):
+[FVP]s (64-bit host machine only, with [Linaro Release 16.06]):
* `FVP_Base_AEMv8A-AEMv8A` (Version 7.7, Build 0.8.7701)
* `FVP_Base_Cortex-A32x4` (Version 10.1, Build 10.1.32)
diff --git a/services/spd/trusty/sm_err.h b/services/spd/trusty/sm_err.h
new file mode 100644
index 0000000..d7cddec
--- /dev/null
+++ b/services/spd/trusty/sm_err.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LIB_SM_SM_ERR_H
+#define __LIB_SM_SM_ERR_H
+
+/* Errors from the secure monitor */
+#define SM_ERR_UNDEFINED_SMC 0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
+#define SM_ERR_INVALID_PARAMETERS -2
+#define SM_ERR_INTERRUPTED -3 /* Got interrupted. Call back with restart SMC */
+#define SM_ERR_UNEXPECTED_RESTART -4 /* Got an restart SMC when we didn't expect it */
+#define SM_ERR_BUSY -5 /* Temporarily busy. Call back with original args */
+#define SM_ERR_INTERLEAVED_SMC -6 /* Got a trusted_service SMC when a restart SMC is required */
+#define SM_ERR_INTERNAL_FAILURE -7 /* Unknown error */
+#define SM_ERR_NOT_SUPPORTED -8
+#define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */
+#define SM_ERR_END_OF_INPUT -10
+
+#endif
diff --git a/services/spd/trusty/smcall.h b/services/spd/trusty/smcall.h
new file mode 100644
index 0000000..7e876c8
--- /dev/null
+++ b/services/spd/trusty/smcall.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LIB_SM_SMCALL_H
+#define __LIB_SM_SMCALL_H
+
+#define SMC_NUM_ENTITIES 64
+#define SMC_NUM_ARGS 4
+#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1)
+
+#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000)
+#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000)
+#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000) >> 24)
+#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF)
+
+#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1) << 31) | \
+ (((smc64) & 0x1) << 30) | \
+ (((entity) & 0x3F) << 24) | \
+ ((fn) & 0xFFFF) \
+ )
+
+#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1, 0)
+#define SMC_STDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0, 0)
+#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1, 1)
+#define SMC_STDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0, 1)
+
+#define SMC_ENTITY_ARCH 0 /* ARM Architecture calls */
+#define SMC_ENTITY_CPU 1 /* CPU Service calls */
+#define SMC_ENTITY_SIP 2 /* SIP Service calls */
+#define SMC_ENTITY_OEM 3 /* OEM Service calls */
+#define SMC_ENTITY_STD 4 /* Standard Service calls */
+#define SMC_ENTITY_RESERVED 5 /* Reserved for future use */
+#define SMC_ENTITY_TRUSTED_APP 48 /* Trusted Application calls */
+#define SMC_ENTITY_TRUSTED_OS 50 /* Trusted OS calls */
+#define SMC_ENTITY_LOGGING 51 /* Used for secure -> nonsecure logging */
+#define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */
+
+/* FC = Fast call, SC = Standard call */
+#define SMC_SC_RESTART_LAST SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_SC_NOP SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
+
+/*
+ * Return from secure os to non-secure os with return value in r1
+ */
+#define SMC_SC_NS_RETURN SMC_STDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
+
+#define SMC_FC_RESERVED SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
+#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2)
+#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3)
+#define SMC_FC_FIQ_ENTER SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4)
+
+#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5)
+#define SMC_FC64_GET_FIQ_REGS SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6)
+
+#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7)
+#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8)
+
+#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9)
+#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10)
+
+/* Trusted OS entity calls */
+#define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
+#define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
+#define SMC_SC_VIRTIO_STOP SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
+
+#define SMC_SC_VDEV_RESET SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
+#define SMC_SC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
+
+#endif /* __LIB_SM_SMCALL_H */
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
new file mode 100644
index 0000000..4962d44
--- /dev/null
+++ b/services/spd/trusty/trusty.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <bl31.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <string.h>
+
+#include "smcall.h"
+#include "sm_err.h"
+
+struct trusty_stack {
+ uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
+};
+
+struct trusty_cpu_ctx {
+ cpu_context_t cpu_ctx;
+ void *saved_sp;
+ uint32_t saved_security_state;
+ int fiq_handler_active;
+ uint64_t fiq_handler_pc;
+ uint64_t fiq_handler_cpsr;
+ uint64_t fiq_handler_sp;
+ uint64_t fiq_pc;
+ uint64_t fiq_cpsr;
+ uint64_t fiq_sp_el1;
+ gp_regs_t fiq_gpregs;
+ struct trusty_stack secure_stack;
+};
+
+struct args {
+ uint64_t r0;
+ uint64_t r1;
+ uint64_t r2;
+ uint64_t r3;
+};
+
+struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
+
+struct args trusty_init_context_stack(void **sp, void *new_stack);
+struct args trusty_context_switch_helper(void **sp, uint64_t r0, uint64_t r1,
+ uint64_t r2, uint64_t r3);
+
+static struct trusty_cpu_ctx *get_trusty_ctx(void)
+{
+ return &trusty_cpu_ctx[plat_my_core_pos()];
+}
+
+static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
+ uint64_t r1, uint64_t r2, uint64_t r3)
+{
+ struct args ret;
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+ assert(ctx->saved_security_state != security_state);
+
+ cm_el1_sysregs_context_save(security_state);
+
+ ctx->saved_security_state = security_state;
+ ret = trusty_context_switch_helper(&ctx->saved_sp, r0, r1, r2, r3);
+
+ assert(ctx->saved_security_state == !security_state);
+
+ cm_el1_sysregs_context_restore(security_state);
+ cm_set_next_eret_context(security_state);
+
+ return ret;
+}
+
+static uint64_t trusty_fiq_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ struct args ret;
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+ assert(!is_caller_secure(flags));
+
+ ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
+ if (ret.r0) {
+ SMC_RET0(handle);
+ }
+
+ if (ctx->fiq_handler_active) {
+ INFO("%s: fiq handler already active\n", __func__);
+ SMC_RET0(handle);
+ }
+
+ ctx->fiq_handler_active = 1;
+ memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
+ ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
+ ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
+ ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);
+
+ write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
+ cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr);
+
+ SMC_RET0(handle);
+}
+
+static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
+ uint64_t handler, uint64_t stack)
+{
+ struct trusty_cpu_ctx *ctx;
+
+ if (cpu >= PLATFORM_CORE_COUNT) {
+ ERROR("%s: cpu %ld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
+ return SM_ERR_INVALID_PARAMETERS;
+ }
+
+ ctx = &trusty_cpu_ctx[cpu];
+ ctx->fiq_handler_pc = handler;
+ ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
+ ctx->fiq_handler_sp = stack;
+
+ SMC_RET1(handle, 0);
+}
+
+static uint64_t trusty_get_fiq_regs(void *handle)
+{
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+ uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0);
+
+ SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1);
+}
+
+static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+ struct args ret;
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+ if (!ctx->fiq_handler_active) {
+ NOTICE("%s: fiq handler not active\n", __func__);
+ SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS);
+ }
+
+ ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
+ if (ret.r0 != 1) {
+ INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %ld\n",
+ __func__, handle, ret.r0);
+ }
+
+ /*
+ * Restore register state to state recorded on fiq entry.
+ *
+ * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
+ * restore them.
+ *
+ * x1-x4 and x8-x17 need to be restored here because smc_handler64
+ * corrupts them (el1 code also restored them).
+ */
+ memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
+ ctx->fiq_handler_active = 0;
+ write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
+ cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr);
+
+ SMC_RET0(handle);
+}
+
+static uint64_t trusty_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct args ret;
+
+ if (is_caller_secure(flags)) {
+ if (smc_fid == SMC_SC_NS_RETURN) {
+ ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
+ SMC_RET4(handle, ret.r0, ret.r1, ret.r2, ret.r3);
+ }
+ INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
+ cpu %d, unknown smc\n",
+ __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags,
+ plat_my_core_pos());
+ SMC_RET1(handle, SMC_UNK);
+ } else {
+ switch (smc_fid) {
+ case SMC_FC64_SET_FIQ_HANDLER:
+ return trusty_set_fiq_handler(handle, x1, x2, x3);
+ case SMC_FC64_GET_FIQ_REGS:
+ return trusty_get_fiq_regs(handle);
+ case SMC_FC_FIQ_EXIT:
+ return trusty_fiq_exit(handle, x1, x2, x3);
+ default:
+ ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
+ x2, x3);
+ SMC_RET1(handle, ret.r0);
+ }
+ }
+}
+
+static int32_t trusty_init(void)
+{
+ void el3_exit();
+ entry_point_info_t *ep_info;
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+ uint32_t cpu = plat_my_core_pos();
+ int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
+ CTX_SPSR_EL3));
+
+ ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ cm_set_context(&ctx->cpu_ctx, SECURE);
+ cm_init_my_context(ep_info);
+
+ /*
+ * Adjust secondary cpu entry point for 32 bit images to the
+ * end of exeption vectors
+ */
+ if ((cpu != 0) && (reg_width == MODE_RW_32)) {
+ INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
+ cpu, ep_info->pc + (1U << 5));
+ cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
+ }
+
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ ctx->saved_security_state = ~0; /* initial saved state is invalid */
+ trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack);
+
+ trusty_context_switch_helper(&ctx->saved_sp, 0, 0, 0, 0);
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ return 0;
+}
+
+static void trusty_cpu_suspend(void)
+{
+ struct args ret;
+ unsigned int linear_id = plat_my_core_pos();
+
+ ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, 0, 0, 0);
+ if (ret.r0 != 0) {
+ INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %ld\n",
+ __func__, linear_id, ret.r0);
+ }
+}
+
+static void trusty_cpu_resume(void)
+{
+ struct args ret;
+ unsigned int linear_id = plat_my_core_pos();
+
+ ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, 0, 0, 0);
+ if (ret.r0 != 0) {
+ INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %ld\n",
+ __func__, linear_id, ret.r0);
+ }
+}
+
+static int32_t trusty_cpu_off_handler(uint64_t unused)
+{
+ trusty_cpu_suspend();
+
+ return 0;
+}
+
+static void trusty_cpu_on_finish_handler(uint64_t unused)
+{
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+ if (!ctx->saved_sp) {
+ trusty_init();
+ } else {
+ trusty_cpu_resume();
+ }
+}
+
+static void trusty_cpu_suspend_handler(uint64_t unused)
+{
+ trusty_cpu_suspend();
+}
+
+static void trusty_cpu_suspend_finish_handler(uint64_t unused)
+{
+ trusty_cpu_resume();
+}
+
+static const spd_pm_ops_t trusty_pm = {
+ .svc_off = trusty_cpu_off_handler,
+ .svc_suspend = trusty_cpu_suspend_handler,
+ .svc_on_finish = trusty_cpu_on_finish_handler,
+ .svc_suspend_finish = trusty_cpu_suspend_finish_handler,
+};
+
+static int32_t trusty_setup(void)
+{
+ entry_point_info_t *ep_info;
+ uint32_t instr;
+ uint32_t flags;
+ int ret;
+ int aarch32 = 0;
+
+ ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (!ep_info) {
+ INFO("Trusty image missing.\n");
+ return -1;
+ }
+
+ instr = *(uint32_t *)ep_info->pc;
+
+ if (instr >> 24 == 0xea) {
+ INFO("trusty: Found 32 bit image\n");
+ aarch32 = 1;
+ } else if (instr >> 8 == 0xd53810) {
+ INFO("trusty: Found 64 bit image\n");
+ } else {
+ INFO("trusty: Found unknown image, 0x%x\n", instr);
+ }
+
+ SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+ if (!aarch32)
+ ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+ else
+ ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+ SPSR_E_LITTLE,
+ DAIF_FIQ_BIT |
+ DAIF_IRQ_BIT |
+ DAIF_ABT_BIT);
+
+ bl31_register_bl32_init(trusty_init);
+
+ psci_register_spd_pm_hook(&trusty_pm);
+
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ trusty_fiq_handler,
+ flags);
+ if (ret)
+ ERROR("trusty: failed to register fiq handler, ret = %d\n", ret);
+
+ return 0;
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ trusty_fast,
+
+ OEN_TOS_START,
+ SMC_ENTITY_SECURE_MONITOR,
+ SMC_TYPE_FAST,
+ trusty_setup,
+ trusty_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for standard SMC calls */
+DECLARE_RT_SVC(
+ trusty_std,
+
+ OEN_TOS_START,
+ SMC_ENTITY_SECURE_MONITOR,
+ SMC_TYPE_STD,
+ NULL,
+ trusty_smc_handler
+);
diff --git a/services/spd/trusty/trusty.mk b/services/spd/trusty/trusty.mk
new file mode 100644
index 0000000..9f53515
--- /dev/null
+++ b/services/spd/trusty/trusty.mk
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPD_INCLUDES :=
+
+SPD_SOURCES := services/spd/trusty/trusty.c \
+ services/spd/trusty/trusty_helpers.S
diff --git a/services/spd/trusty/trusty_helpers.S b/services/spd/trusty/trusty_helpers.S
new file mode 100644
index 0000000..9bbb044
--- /dev/null
+++ b/services/spd/trusty/trusty_helpers.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+
+.macro push ra, rb, sp=sp
+ stp \ra, \rb, [\sp,#-16]!
+.endm
+
+.macro pop ra, rb, sp=sp
+ ldp \ra, \rb, [\sp], #16
+.endm
+
+ .global trusty_context_switch_helper
+func trusty_context_switch_helper
+ push x8, xzr
+ push x19, x20
+ push x21, x22
+ push x23, x24
+ push x25, x26
+ push x27, x28
+ push x29, x30
+
+ mov x9, sp
+ ldr x10, [x0]
+ mov sp, x10
+ str x9, [x0]
+
+ pop x29, x30
+ pop x27, x28
+ pop x25, x26
+ pop x23, x24
+ pop x21, x22
+ pop x19, x20
+ pop x8, xzr
+ stp x1, x2, [x8]
+ stp x3, x4, [x8, #16]
+
+ ret
+endfunc trusty_context_switch_helper
+
+ .global trusty_init_context_stack
+func trusty_init_context_stack
+ push x8, xzr, x1
+ push xzr, xzr, x1
+ push xzr, xzr, x1
+ push xzr, xzr, x1
+ push xzr, xzr, x1
+ push xzr, xzr, x1
+ adr x9, el3_exit
+ push xzr, x9, x1
+ str x1, [x0]
+ ret
+endfunc trusty_init_context_stack