Merge pull request #762 from douglas-raillard-arm/dr/doc_build_info

Clarify dependency for PSCI_EXTENDED_STATE_ID
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index f333bf1..220d1cc 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -38,10 +38,10 @@
 
 	.globl	runtime_exceptions
 
-	/* -----------------------------------------------------
-	 * Handle SMC exceptions separately from other sync.
-	 * exceptions.
-	 * -----------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * This macro handles Synchronous exceptions.
+	 * Only SMC exceptions are supported.
+	 * ---------------------------------------------------------------------
 	 */
 	.macro	handle_sync_exception
 	/* Enable the SError interrupt */
@@ -50,11 +50,10 @@
 	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 
 #if ENABLE_RUNTIME_INSTRUMENTATION
-
 	/*
-	 * Read the timestamp value and store it in per-cpu data.
-	 * The value will be extracted from per-cpu data by the
-	 * C level SMC handler and saved to the PMF timestamp region.
+	 * Read the timestamp value and store it in per-cpu data. The value
+	 * will be extracted from per-cpu data by the C level SMC handler and
+	 * saved to the PMF timestamp region.
 	 */
 	mrs	x30, cntpct_el0
 	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
@@ -66,26 +65,22 @@
 	mrs	x30, esr_el3
 	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
 
+	/* Handle SMC exceptions separately from other synchronous exceptions */
 	cmp	x30, #EC_AARCH32_SMC
 	b.eq	smc_handler32
 
 	cmp	x30, #EC_AARCH64_SMC
 	b.eq	smc_handler64
 
-	/* -----------------------------------------------------
-	 * The following code handles any synchronous exception
-	 * that is not an SMC.
-	 * -----------------------------------------------------
-	 */
-
+	/* Other kinds of synchronous exceptions are not handled */
 	bl	report_unhandled_exception
 	.endm
 
 
-	/* -----------------------------------------------------
-	 * This macro handles FIQ or IRQ interrupts i.e. EL3,
-	 * S-EL1 and NS interrupts.
-	 * -----------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
+	 * interrupts.
+	 * ---------------------------------------------------------------------
 	 */
 	.macro	handle_interrupt_exception label
 	/* Enable the SError interrupt */
@@ -94,10 +89,7 @@
 	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 	bl	save_gp_registers
 
-	/*
-	 * Save the EL3 system registers needed to return from
-	 * this exception.
-	 */
+	/* Save the EL3 system registers needed to return from this exception */
 	mrs	x0, spsr_el3
 	mrs	x1, elr_el3
 	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
@@ -109,36 +101,34 @@
 	mov	sp, x2
 
 	/*
-	 * Find out whether this is a valid interrupt type. If the
-	 * interrupt controller reports a spurious interrupt then
-	 * return to where we came from.
+	 * Find out whether this is a valid interrupt type.
+	 * If the interrupt controller reports a spurious interrupt then return
+	 * to where we came from.
 	 */
 	bl	plat_ic_get_pending_interrupt_type
 	cmp	x0, #INTR_TYPE_INVAL
 	b.eq	interrupt_exit_\label
 
 	/*
-	 * Get the registered handler for this interrupt type. A
-	 * NULL return value could be 'cause of the following
-	 * conditions:
+	 * Get the registered handler for this interrupt type.
+	 * A NULL return value could be 'cause of the following conditions:
 	 *
-	 * a. An interrupt of a type was routed correctly but a
-	 *    handler for its type was not registered.
+	 * a. An interrupt of a type was routed correctly but a handler for its
+	 *    type was not registered.
 	 *
-	 * b. An interrupt of a type was not routed correctly so
-	 *    a handler for its type was not registered.
+	 * b. An interrupt of a type was not routed correctly so a handler for
+	 *    its type was not registered.
 	 *
-	 * c. An interrupt of a type was routed correctly to EL3,
-	 *    but was deasserted before its pending state could
-	 *    be read. Another interrupt of a different type pended
-	 *    at the same time and its type was reported as pending
-	 *    instead. However, a handler for this type was not
-	 *    registered.
+	 * c. An interrupt of a type was routed correctly to EL3, but was
+	 *    deasserted before its pending state could be read. Another
+	 *    interrupt of a different type pended at the same time and its
+	 *    type was reported as pending instead. However, a handler for this
+	 *    type was not registered.
 	 *
-	 * a. and b. can only happen due to a programming error.
-	 * The occurrence of c. could be beyond the control of
-	 * Trusted Firmware. It makes sense to return from this
-	 * exception instead of reporting an error.
+	 * a. and b. can only happen due to a programming error. The
+	 * occurrence of c. could be beyond the control of Trusted Firmware.
+	 * It makes sense to return from this exception instead of reporting an
+	 * error.
 	 */
 	bl	get_interrupt_type_handler
 	cbz	x0, interrupt_exit_\label
@@ -153,7 +143,7 @@
 	/* Restore the reference to the 'handle' i.e. SP_EL3 */
 	mov	x2, x20
 
-	/*  x3 will point to a cookie (not used now) */
+	/* x3 will point to a cookie (not used now) */
 	mov	x3, xzr
 
 	/* Call the interrupt type handler */
@@ -180,24 +170,20 @@
 
 vector_base runtime_exceptions
 
-	/* -----------------------------------------------------
-	 * Current EL with _sp_el0 : 0x0 - 0x200
-	 * -----------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
 	 */
 vector_entry sync_exception_sp_el0
-	/* -----------------------------------------------------
-	 * We don't expect any synchronous exceptions from EL3
-	 * -----------------------------------------------------
-	 */
+	/* We don't expect any synchronous exceptions from EL3 */
 	bl	report_unhandled_exception
 	check_vector_size sync_exception_sp_el0
 
-	/* -----------------------------------------------------
-	 * EL3 code is non-reentrant. Any asynchronous exception
-	 * is a serious error. Loop infinitely.
-	 * -----------------------------------------------------
-	 */
 vector_entry irq_sp_el0
+	/*
+	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
+	 * error. Loop infinitely.
+	 */
 	bl	report_unhandled_interrupt
 	check_vector_size irq_sp_el0
 
@@ -211,18 +197,16 @@
 	bl	report_unhandled_exception
 	check_vector_size serror_sp_el0
 
-	/* -----------------------------------------------------
-	 * Current EL with SPx: 0x200 - 0x400
-	 * -----------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
 	 */
-
 vector_entry sync_exception_sp_elx
-	/* -----------------------------------------------------
-	 * This exception will trigger if anything went wrong
-	 * during a previous exception entry or exit or while
-	 * handling an earlier unexpected synchronous exception.
-	 * There is a high probability that SP_EL3 is corrupted.
-	 * -----------------------------------------------------
+	/*
+	 * This exception will trigger if anything went wrong during a previous
+	 * exception entry or exit or while handling an earlier unexpected
+	 * synchronous exception. There is a high probability that SP_EL3 is
+	 * corrupted.
 	 */
 	bl	report_unhandled_exception
 	check_vector_size sync_exception_sp_elx
@@ -239,27 +223,20 @@
 	bl	report_unhandled_exception
 	check_vector_size serror_sp_elx
 
-	/* -----------------------------------------------------
+	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch64 : 0x400 - 0x600
-	 * -----------------------------------------------------
+	 * ---------------------------------------------------------------------
 	 */
 vector_entry sync_exception_aarch64
-	/* -----------------------------------------------------
-	 * This exception vector will be the entry point for
-	 * SMCs and traps that are unhandled at lower ELs most
-	 * commonly. SP_EL3 should point to a valid cpu context
-	 * where the general purpose and system register state
-	 * can be saved.
-	 * -----------------------------------------------------
+	/*
+	 * This exception vector will be the entry point for SMCs and traps
+	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
+	 * to a valid cpu context where the general purpose and system register
+	 * state can be saved.
 	 */
 	handle_sync_exception
 	check_vector_size sync_exception_aarch64
 
-	/* -----------------------------------------------------
-	 * Asynchronous exceptions from lower ELs are not
-	 * currently supported. Report their occurrence.
-	 * -----------------------------------------------------
-	 */
 vector_entry irq_aarch64
 	handle_interrupt_exception irq_aarch64
 	check_vector_size irq_aarch64
@@ -269,30 +246,27 @@
 	check_vector_size fiq_aarch64
 
 vector_entry serror_aarch64
+	/*
+	 * SError exceptions from lower ELs are not currently supported.
+	 * Report their occurrence.
+	 */
 	bl	report_unhandled_exception
 	check_vector_size serror_aarch64
 
-	/* -----------------------------------------------------
+	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch32 : 0x600 - 0x800
-	 * -----------------------------------------------------
+	 * ---------------------------------------------------------------------
 	 */
 vector_entry sync_exception_aarch32
-	/* -----------------------------------------------------
-	 * This exception vector will be the entry point for
-	 * SMCs and traps that are unhandled at lower ELs most
-	 * commonly. SP_EL3 should point to a valid cpu context
-	 * where the general purpose and system register state
-	 * can be saved.
-	 * -----------------------------------------------------
+	/*
+	 * This exception vector will be the entry point for SMCs and traps
+	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
+	 * to a valid cpu context where the general purpose and system register
+	 * state can be saved.
 	 */
 	handle_sync_exception
 	check_vector_size sync_exception_aarch32
 
-	/* -----------------------------------------------------
-	 * Asynchronous exceptions from lower ELs are not
-	 * currently supported. Report their occurrence.
-	 * -----------------------------------------------------
-	 */
 vector_entry irq_aarch32
 	handle_interrupt_exception irq_aarch32
 	check_vector_size irq_aarch32
@@ -302,34 +276,34 @@
 	check_vector_size fiq_aarch32
 
 vector_entry serror_aarch32
+	/*
+	 * SError exceptions from lower ELs are not currently supported.
+	 * Report their occurrence.
+	 */
 	bl	report_unhandled_exception
 	check_vector_size serror_aarch32
 
 
-	/* -----------------------------------------------------
+	/* ---------------------------------------------------------------------
 	 * The following code handles secure monitor calls.
-	 * Depending upon the execution state from where the SMC
-	 * has been invoked, it frees some general purpose
-	 * registers to perform the remaining tasks. They
-	 * involve finding the runtime service handler that is
-	 * the target of the SMC & switching to runtime stacks
-	 * (SP_EL0) before calling the handler.
+	 * Depending upon the execution state from where the SMC has been
+	 * invoked, it frees some general purpose registers to perform the
+	 * remaining tasks. They involve finding the runtime service handler
+	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
+	 * before calling the handler.
 	 *
-	 * Note that x30 has been explicitly saved and can be
-	 * used here
-	 * -----------------------------------------------------
+	 * Note that x30 has been explicitly saved and can be used here
+	 * ---------------------------------------------------------------------
 	 */
 func smc_handler
 smc_handler32:
 	/* Check whether aarch32 issued an SMC64 */
 	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
 
-	/* -----------------------------------------------------
-	 * Since we're are coming from aarch32, x8-x18 need to
-	 * be saved as per SMC32 calling convention. If a lower
-	 * EL in aarch64 is making an SMC32 call then it must
-	 * have saved x8-x17 already therein.
-	 * -----------------------------------------------------
+	/*
+	 * Since we're are coming from aarch32, x8-x18 need to be saved as per
+	 * SMC32 calling convention. If a lower EL in aarch64 is making an
+	 * SMC32 call then it must have saved x8-x17 already therein.
 	 */
 	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
 	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
@@ -340,15 +314,14 @@
 	/* x4-x7, x18, sp_el0 are saved below */
 
 smc_handler64:
-	/* -----------------------------------------------------
-	 * Populate the parameters for the SMC handler. We
-	 * already have x0-x4 in place. x5 will point to a
-	 * cookie (not used now). x6 will point to the context
-	 * structure (SP_EL3) and x7 will contain flags we need
-	 * to pass to the handler Hence save x5-x7. Note that x4
-	 * only needs to be preserved for AArch32 callers but we
-	 * do it for AArch64 callers as well for convenience
-	 * -----------------------------------------------------
+	/*
+	 * Populate the parameters for the SMC handler.
+	 * We already have x0-x4 in place. x5 will point to a cookie (not used
+	 * now). x6 will point to the context structure (SP_EL3) and x7 will
+	 * contain flags we need to pass to the handler Hence save x5-x7.
+	 *
+	 * Note: x4 only needs to be preserved for AArch32 callers but we do it
+	 *       for AArch64 callers as well for convenience
 	 */
 	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
 	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
@@ -370,12 +343,10 @@
 	adr	x14, rt_svc_descs_indices
 	ldrb	w15, [x14, x16]
 
-	/* -----------------------------------------------------
-	 * Restore the saved C runtime stack value which will
-	 * become the new SP_EL0 i.e. EL3 runtime stack. It was
-	 * saved in the 'cpu_context' structure prior to the last
-	 * ERET from EL3.
-	 * -----------------------------------------------------
+	/*
+	 * Restore the saved C runtime stack value which will become the new
+	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
+	 * structure prior to the last ERET from EL3.
 	 */
 	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
 
@@ -388,22 +359,19 @@
 	/* Switch to SP_EL0 */
 	msr	spsel, #0
 
-	/* -----------------------------------------------------
+	/*
 	 * Get the descriptor using the index
 	 * x11 = (base + off), x15 = index
 	 *
 	 * handler = (base + off) + (index << log2(size))
-	 * -----------------------------------------------------
 	 */
 	lsl	w10, w15, #RT_SVC_SIZE_LOG2
 	ldr	x15, [x11, w10, uxtw]
 
-	/* -----------------------------------------------------
-	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
-	 * is a world switch during SMC handling.
-	 * TODO: Revisit if all system registers can be saved
-	 * later.
-	 * -----------------------------------------------------
+	/*
+	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
+	 * switch during SMC handling.
+	 * TODO: Revisit if all system registers can be saved later.
 	 */
 	mrs	x16, spsr_el3
 	mrs	x17, elr_el3
@@ -416,12 +384,10 @@
 
 	mov	sp, x12
 
-	/* -----------------------------------------------------
-	 * Call the Secure Monitor Call handler and then drop
-	 * directly into el3_exit() which will program any
-	 * remaining architectural state prior to issuing the
-	 * ERET to the desired lower EL.
-	 * -----------------------------------------------------
+	/*
+	 * Call the Secure Monitor Call handler and then drop directly into
+	 * el3_exit() which will program any remaining architectural state
+	 * prior to issuing the ERET to the desired lower EL.
 	 */
 #if DEBUG
 	cbz	x15, rt_svc_fw_critical_error
@@ -436,7 +402,7 @@
 	 * callers will find the registers contents unchanged, but AArch64
 	 * callers will find the registers modified (with stale earlier NS
 	 * content). Either way, we aren't leaking any secure information
-	 * through them
+	 * through them.
 	 */
 	mov	w0, #SMC_UNK
 	b	restore_gp_registers_callee_eret
@@ -447,6 +413,7 @@
 	eret
 
 rt_svc_fw_critical_error:
-	msr	spsel, #1 /* Switch to SP_ELx */
+	/* Switch to SP_ELx */
+	msr	spsel, #1
 	bl	report_unhandled_exception
 endfunc smc_handler
diff --git a/docs/interrupt-framework-design.md b/docs/interrupt-framework-design.md
index e50d175..b468949 100644
--- a/docs/interrupt-framework-design.md
+++ b/docs/interrupt-framework-design.md
@@ -335,9 +335,9 @@
 This component declares the following prototype for a handler of an interrupt type.
 
         typedef uint64_t (*interrupt_type_handler_t)(uint32_t id,
-					     uint32_t flags,
-					     void *handle,
-					     void *cookie);
+                                                     uint32_t flags,
+                                                     void *handle,
+                                                     void *cookie);
 
 The `id` is parameter is reserved and could be used in the future for passing
 the interrupt id of the highest pending interrupt only if there is a foolproof
@@ -358,10 +358,16 @@
 for the security state specified in the `flags` parameter.
 
 Once the handler routine completes, execution will return to either the secure
-or non-secure state. The handler routine should return a pointer to
-`cpu_context` structure of the current CPU for the target security state. It
-should treat all error conditions as critical errors and take appropriate action
-within its implementation e.g. use assertion failures.
+or non-secure state. The handler routine must return a pointer to
+`cpu_context` structure of the current CPU for the target security state. On
+AArch64, this return value is currently ignored by the caller as the
+appropriate `cpu_context` to be used is expected to be set by the handler
+via the context management library APIs.
+A portable interrupt handler implementation must set the target context both in
+the structure pointed to by the returned pointer and via the context management
+library APIs. The handler should treat all error conditions as critical errors
+and take appropriate action within its implementation e.g. use assertion
+failures.
 
 The runtime firmware provides the following API for registering a handler for a
 particular type of interrupt. A Secure Payload Dispatcher service should use
@@ -370,8 +376,8 @@
 the type of interrupt.
 
     int32_t register_interrupt_type_handler(uint32_t type,
-					interrupt_type_handler handler,
-					uint64_t flags);
+                                            interrupt_type_handler handler,
+                                            uint64_t flags);
 
 
 The `type` parameter can be one of the three interrupt types listed above i.e.
@@ -962,13 +968,13 @@
 secure software sequence for issuing a `standard` SMC would look like this,
 assuming `P.STATE.I=0` in the non secure state :
 
-	int rc;
-	rc = smc(TSP_STD_SMC_FID, ...); 	/* Issue a Standard SMC call */
-        /* The pending non-secure interrupt is handled by the interrupt handler
-           and returns back here. */
-	while (rc == SMC_PREEMPTED) {		/* Check if the SMC call is preempted */
-	    rc = smc(TSP_FID_RESUME);		/* Issue resume SMC call */
-	}
+    int rc;
+    rc = smc(TSP_STD_SMC_FID, ...);     /* Issue a Standard SMC call */
+    /* The pending non-secure interrupt is handled by the interrupt handler
+       and returns back here. */
+    while (rc == SMC_PREEMPTED) {       /* Check if the SMC call is preempted */
+        rc = smc(TSP_FID_RESUME);       /* Issue resume SMC call */
+    }
 
 The `TSP_STD_SMC_FID` is any `standard` SMC function identifier and the smc()
 function invokes a SMC call with the required arguments. The pending non-secure
diff --git a/docs/spd/trusty-dispatcher.md b/docs/spd/trusty-dispatcher.md
new file mode 100644
index 0000000..0258959
--- /dev/null
+++ b/docs/spd/trusty-dispatcher.md
@@ -0,0 +1,15 @@
+Trusty Dispatcher
+=================
+Trusty is a a set of software components, supporting a Trusted Execution
+Environment (TEE) on mobile devices, published and maintained by Google.
+
+Detailed information and build instructions can be found on the Android
+Open Source Project (AOSP) webpage for Trusty hosted at
+https://source.android.com/security/trusty
+
+Supported platforms
+===================
+Out of all the platforms supported by the ARM Trusted Firmware, Trusty is
+verified and supported by NVIDIA's Tegra SoCs.
+
+
diff --git a/plat/xilinx/zynqmp/aarch64/zynqmp_common.c b/plat/xilinx/zynqmp/aarch64/zynqmp_common.c
index 60a1605..8e461d3 100644
--- a/plat/xilinx/zynqmp/aarch64/zynqmp_common.c
+++ b/plat/xilinx/zynqmp/aarch64/zynqmp_common.c
@@ -49,11 +49,14 @@
 
 static unsigned int zynqmp_get_silicon_ver(void)
 {
-	unsigned int ver;
+	static unsigned int ver;
 
-	ver = mmio_read_32(ZYNQMP_CSU_BASEADDR + ZYNQMP_CSU_VERSION_OFFSET);
-	ver &= ZYNQMP_SILICON_VER_MASK;
-	ver >>= ZYNQMP_SILICON_VER_SHIFT;
+	if (!ver) {
+		ver = mmio_read_32(ZYNQMP_CSU_BASEADDR +
+				   ZYNQMP_CSU_VERSION_OFFSET);
+		ver &= ZYNQMP_SILICON_VER_MASK;
+		ver >>= ZYNQMP_SILICON_VER_SHIFT;
+	}
 
 	return ver;
 }
@@ -74,34 +77,6 @@
 	return 100000000;
 }
 
-static unsigned int zynqmp_get_system_timer_freq(void)
-{
-	unsigned int ver = zynqmp_get_silicon_ver();
-
-	switch (ver) {
-	case ZYNQMP_CSU_VERSION_VELOCE:
-		return 10000;
-	case ZYNQMP_CSU_VERSION_EP108:
-		return 4000000;
-	case ZYNQMP_CSU_VERSION_QEMU:
-		return 50000000;
-	}
-
-	return 100000000;
-}
-
-unsigned int zynqmp_get_silicon_id(void)
-{
-	uint32_t id;
-
-	id = mmio_read_32(ZYNQMP_CSU_BASEADDR + ZYNQMP_CSU_IDCODE_OFFSET);
-
-	id &= ZYNQMP_CSU_IDCODE_DEVICE_CODE_MASK | ZYNQMP_CSU_IDCODE_SVD_MASK;
-	id >>= ZYNQMP_CSU_IDCODE_SVD_SHIFT;
-
-	return id;
-}
-
 #if LOG_LEVEL >= LOG_LEVEL_NOTICE
 static const struct {
 	unsigned int id;
@@ -153,6 +128,18 @@
 	},
 };
 
+static unsigned int zynqmp_get_silicon_id(void)
+{
+	uint32_t id;
+
+	id = mmio_read_32(ZYNQMP_CSU_BASEADDR + ZYNQMP_CSU_IDCODE_OFFSET);
+
+	id &= ZYNQMP_CSU_IDCODE_DEVICE_CODE_MASK | ZYNQMP_CSU_IDCODE_SVD_MASK;
+	id >>= ZYNQMP_CSU_IDCODE_SVD_SHIFT;
+
+	return id;
+}
+
 static char *zynqmp_get_silicon_idcode_name(void)
 {
 	unsigned int id;
@@ -281,25 +268,21 @@
 {
 	zynqmp_discover_pmufw();
 	zynqmp_print_platform_name();
-
-	/* Global timer init - Program time stamp reference clk */
-	uint32_t val = mmio_read_32(CRL_APB_TIMESTAMP_REF_CTRL);
-	val |= CRL_APB_TIMESTAMP_REF_CTRL_CLKACT_BIT;
-	mmio_write_32(CRL_APB_TIMESTAMP_REF_CTRL, val);
-
-	/* Program freq register in System counter and enable system counter. */
-	mmio_write_32(IOU_SCNTRS_BASEFREQ, zynqmp_get_system_timer_freq());
-	mmio_write_32(IOU_SCNTRS_CONTROL, IOU_SCNTRS_CONTROL_EN);
-
 	generic_delay_timer_init();
 }
 
 unsigned int plat_get_syscnt_freq2(void)
 {
-	unsigned int counter_base_frequency;
+	unsigned int ver = zynqmp_get_silicon_ver();
 
-	/* FIXME: Read the frequency from Frequency modes table */
-	counter_base_frequency = zynqmp_get_system_timer_freq();
+	switch (ver) {
+	case ZYNQMP_CSU_VERSION_VELOCE:
+		return 10000;
+	case ZYNQMP_CSU_VERSION_EP108:
+		return 4000000;
+	case ZYNQMP_CSU_VERSION_QEMU:
+		return 50000000;
+	}
 
-	return counter_base_frequency;
+	return mmio_read_32(IOU_SCNTRS_BASEFREQ);
 }
diff --git a/plat/xilinx/zynqmp/include/platform_def.h b/plat/xilinx/zynqmp/include/platform_def.h
index 3c1a9e5..047aeaa 100644
--- a/plat/xilinx/zynqmp/include/platform_def.h
+++ b/plat/xilinx/zynqmp/include/platform_def.h
@@ -113,7 +113,6 @@
  * as Group 0 interrupts.
  */
 #define PLAT_ARM_G1S_IRQS	ARM_IRQ_SEC_PHY_TIMER,	\
-				IRQ_SEC_IPI_APU,	\
 				ARM_IRQ_SEC_SGI_0,	\
 				ARM_IRQ_SEC_SGI_1,	\
 				ARM_IRQ_SEC_SGI_2,	\
diff --git a/plat/xilinx/zynqmp/plat_psci.c b/plat/xilinx/zynqmp/plat_psci.c
index 55227ea..7f7e032 100644
--- a/plat/xilinx/zynqmp/plat_psci.c
+++ b/plat/xilinx/zynqmp/plat_psci.c
@@ -270,7 +270,8 @@
 	plat_arm_interconnect_exit_coherency();
 
 	/* Send the power down request to the PMU */
-	pm_system_shutdown(0);
+	pm_system_shutdown(PMF_SHUTDOWN_TYPE_SHUTDOWN,
+			   PMF_SHUTDOWN_SUBTYPE_SUBSYSTEM);
 
 	while (1)
 		wfi();
@@ -304,7 +305,8 @@
 	plat_arm_interconnect_exit_coherency();
 
 	/* Send the system reset request to the PMU */
-	pm_system_shutdown(1);
+	pm_system_shutdown(PMF_SHUTDOWN_TYPE_RESET,
+			   PMF_SHUTDOWN_SUBTYPE_SUBSYSTEM);
 
 	while (1)
 		wfi();
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_sys.c b/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
index e859ee3..15e12fa 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
@@ -101,7 +101,7 @@
 	/* Send request to the PMU */
 	PM_PACK_PAYLOAD6(payload, PM_SELF_SUSPEND, proc->node_id, latency,
 			 state, address, (address >> 32));
-	return pm_ipi_send_sync(proc, payload, NULL);
+	return pm_ipi_send_sync(proc, payload, NULL, 0);
 }
 
 /**
@@ -123,7 +123,7 @@
 	/* Send request to the PMU */
 	PM_PACK_PAYLOAD5(payload, PM_REQ_SUSPEND, target, ack, latency, state);
 	if (ack == REQ_ACK_BLOCKING)
-		return pm_ipi_send_sync(primary_proc, payload, NULL);
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 	else
 		return pm_ipi_send(primary_proc, payload);
 }
@@ -165,7 +165,7 @@
 			 encoded_address >> 32, ack);
 
 	if (ack == REQ_ACK_BLOCKING)
-		return pm_ipi_send_sync(primary_proc, payload, NULL);
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 	else
 		return pm_ipi_send(primary_proc, payload);
 }
@@ -187,7 +187,7 @@
 	PM_PACK_PAYLOAD3(payload, PM_FORCE_POWERDOWN, target, ack);
 
 	if (ack == REQ_ACK_BLOCKING)
-		return pm_ipi_send_sync(primary_proc, payload, NULL);
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 	else
 		return pm_ipi_send(primary_proc, payload);
 }
@@ -243,11 +243,11 @@
  *
  * @return	Returns status, either success or error+reason
  */
-enum pm_ret_status pm_system_shutdown(unsigned int restart)
+enum pm_ret_status pm_system_shutdown(unsigned int type, unsigned int subtype)
 {
 	uint32_t payload[PAYLOAD_ARG_CNT];
 
-	PM_PACK_PAYLOAD2(payload, PM_SYSTEM_SHUTDOWN, restart);
+	PM_PACK_PAYLOAD3(payload, PM_SYSTEM_SHUTDOWN, type, subtype);
 	return pm_ipi_send(primary_proc, payload);
 }
 
@@ -272,7 +272,7 @@
 	PM_PACK_PAYLOAD5(payload, PM_REQ_NODE, nid, capabilities, qos, ack);
 
 	if (ack == REQ_ACK_BLOCKING)
-		return pm_ipi_send_sync(primary_proc, payload, NULL);
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 	else
 		return pm_ipi_send(primary_proc, payload);
 }
@@ -299,7 +299,7 @@
 			 ack);
 
 	if (ack == REQ_ACK_BLOCKING)
-		return pm_ipi_send_sync(primary_proc, payload, NULL);
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 	else
 		return pm_ipi_send(primary_proc, payload);
 }
@@ -348,7 +348,7 @@
 
 	/* Send request to the PMU */
 	PM_PACK_PAYLOAD1(payload, PM_GET_API_VERSION);
-	return pm_ipi_send_sync(primary_proc, payload, version);
+	return pm_ipi_send_sync(primary_proc, payload, version, 1);
 }
 
 /**
@@ -396,7 +396,7 @@
 	PM_PACK_PAYLOAD5(payload, PM_REGISTER_NOTIFIER,
 			 nid, event, wake, enable);
 
-	return pm_ipi_send(primary_proc, payload);
+	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
 
 /**
@@ -418,7 +418,7 @@
 
 	/* Send request to the PMU */
 	PM_PACK_PAYLOAD3(payload, PM_GET_OP_CHARACTERISTIC, nid, type);
-	return pm_ipi_send_sync(primary_proc, payload, result);
+	return pm_ipi_send_sync(primary_proc, payload, result, 1);
 }
 
 /* Direct-Control API functions */
@@ -454,7 +454,7 @@
 
 	/* Send request to the PMU */
 	PM_PACK_PAYLOAD2(payload, PM_RESET_GET_STATUS, reset);
-	return pm_ipi_send_sync(primary_proc, payload, reset_status);
+	return pm_ipi_send_sync(primary_proc, payload, reset_status, 1);
 }
 
 /**
@@ -476,7 +476,7 @@
 
 	/* Send request to the PMU */
 	PM_PACK_PAYLOAD4(payload, PM_MMIO_WRITE, address, mask, value);
-	return pm_ipi_send_sync(primary_proc, payload, NULL);
+	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
 }
 
 /**
@@ -495,7 +495,7 @@
 
 	/* Send request to the PMU */
 	PM_PACK_PAYLOAD2(payload, PM_MMIO_READ, address);
-	return pm_ipi_send_sync(primary_proc, payload, value);
+	return pm_ipi_send_sync(primary_proc, payload, value, 1);
 }
 
 /**
@@ -539,5 +539,34 @@
 
 	/* Send request to the PMU */
 	PM_PACK_PAYLOAD1(payload, PM_FPGA_GET_STATUS);
-	return pm_ipi_send_sync(primary_proc, payload, value);
+	return pm_ipi_send_sync(primary_proc, payload, value, 1);
+}
+
+/**
+ * pm_get_chipid() - Read silicon ID registers
+ * @value       Buffer for return values. Must be large enough
+ *		to hold 8 bytes.
+ *
+ * @return      Returns silicon ID registers
+ */
+enum pm_ret_status pm_get_chipid(uint32_t *value)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD1(payload, PM_GET_CHIPID);
+	return pm_ipi_send_sync(primary_proc, payload, value, 2);
+}
+
+/**
+ * pm_get_callbackdata() - Read from IPI response buffer
+ * @data - array of PAYLOAD_ARG_CNT elements
+ *
+ * Read value from ipi buffer response buffer.
+ */
+void pm_get_callbackdata(uint32_t *data, size_t count)
+{
+
+	pm_ipi_buff_read_callb(data, count);
+	pm_ipi_irq_clear();
 }
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_sys.h b/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
index 26d83e7..7e22948 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
@@ -61,7 +61,7 @@
 					enum pm_node_id wkup_node,
 					unsigned int enable);
 
-enum pm_ret_status pm_system_shutdown(unsigned int restart);
+enum pm_ret_status pm_system_shutdown(unsigned int type, unsigned int subtype);
 
 enum pm_ret_status pm_init_suspend_cb(enum pm_suspend_reason reason,
 				      unsigned int latency,
@@ -115,4 +115,7 @@
 				uint32_t flags);
 enum pm_ret_status pm_fpga_get_status(unsigned int *value);
 
+enum pm_ret_status pm_get_chipid(uint32_t *value);
+void pm_get_callbackdata(uint32_t *data, size_t count);
+
 #endif /* _PM_API_SYS_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_client.c b/plat/xilinx/zynqmp/pm_service/pm_client.c
index b77a1cf..e102b4f 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_client.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_client.c
@@ -40,7 +40,6 @@
 #include <bl_common.h>
 #include <mmio.h>
 #include <string.h>
-#include <utils.h>
 #include "pm_api_sys.h"
 #include "pm_client.h"
 #include "pm_ipi.h"
diff --git a/plat/xilinx/zynqmp/pm_service/pm_defs.h b/plat/xilinx/zynqmp/pm_service/pm_defs.h
index aec335a..0c071c1 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_defs.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_defs.h
@@ -225,4 +225,15 @@
 	PM_BOOT_ERROR,
 };
 
+enum pm_shutdown_type {
+	PMF_SHUTDOWN_TYPE_SHUTDOWN,
+	PMF_SHUTDOWN_TYPE_RESET,
+};
+
+enum pm_shutdown_subtype {
+	PMF_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+	PMF_SHUTDOWN_SUBTYPE_PS_ONLY,
+	PMF_SHUTDOWN_SUBTYPE_SYSTEM,
+};
+
 #endif /* _PM_DEFS_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_ipi.c b/plat/xilinx/zynqmp/pm_service/pm_ipi.c
index c3e7ccb..9148f90 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_ipi.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_ipi.c
@@ -56,6 +56,8 @@
 #define IPI_BUFFER_TARGET_PL_3_OFFSET	0x180U
 #define IPI_BUFFER_TARGET_PMU_OFFSET	0x1C0U
 
+#define IPI_BUFFER_MAX_WORDS	8
+
 #define IPI_BUFFER_REQ_OFFSET	0x0U
 #define IPI_BUFFER_RESP_OFFSET	0x20U
 
@@ -96,7 +98,6 @@
  *		Any other return value will cause the framework to ignore
  *		the service
  *
- * Enable interrupts at registered entrance in IPI peripheral
  * Called from pm_setup initialization function
  */
 int pm_ipi_init(void)
@@ -188,13 +189,15 @@
 /**
  * pm_ipi_buff_read() - Reads IPI response after PMU has handled interrupt
  * @proc	Pointer to the processor who is waiting and reading response
- * @value	Used to return value from 2nd IPI buffer element (optional)
+ * @value	Used to return value from IPI buffer element (optional)
+ * @count	Number of values to return in @value
  *
  * @return	Returns status, either success or error+reason
  */
 static enum pm_ret_status pm_ipi_buff_read(const struct pm_proc *proc,
-					   unsigned int *value)
+					   unsigned int *value, size_t count)
 {
+	size_t i;
 	uintptr_t buffer_base = proc->ipi->buffer_base +
 				IPI_BUFFER_TARGET_PMU_OFFSET +
 				IPI_BUFFER_RESP_OFFSET;
@@ -208,17 +211,43 @@
 	 * buf-2: unused
 	 * buf-3: unused
 	 */
-	if (value != NULL)
-		*value = mmio_read_32(buffer_base + PAYLOAD_ARG_SIZE);
+	for (i = 1; i <= count; i++) {
+		*value = mmio_read_32(buffer_base + (i * PAYLOAD_ARG_SIZE));
+		value++;
+	}
 
 	return mmio_read_32(buffer_base);
 }
 
 /**
+ * pm_ipi_buff_read_callb() - Reads IPI response after PMU has handled interrupt
+ * @value	Used to return value from IPI buffer element (optional)
+ * @count	Number of values to return in @value
+ *
+ * @return	Returns status, either success or error+reason
+ */
+void pm_ipi_buff_read_callb(unsigned int *value, size_t count)
+{
+	size_t i;
+	uintptr_t buffer_base = IPI_BUFFER_PMU_BASE +
+				IPI_BUFFER_TARGET_APU_OFFSET +
+				IPI_BUFFER_REQ_OFFSET;
+
+	if (count > IPI_BUFFER_MAX_WORDS)
+		count = IPI_BUFFER_MAX_WORDS;
+
+	for (i = 0; i <= count; i++) {
+		*value = mmio_read_32(buffer_base + (i * PAYLOAD_ARG_SIZE));
+		value++;
+	}
+}
+
+/**
  * pm_ipi_send_sync() - Sends IPI request to the PMU
  * @proc	Pointer to the processor who is initiating request
  * @payload	API id and call arguments to be written in IPI buffer
- * @value	Used to return value from 2nd IPI buffer element (optional)
+ * @value	Used to return value from IPI buffer element (optional)
+ * @count	Number of values to return in @value
  *
  * Send an IPI request to the power controller and wait for it to be handled.
  *
@@ -227,7 +256,7 @@
  */
 enum pm_ret_status pm_ipi_send_sync(const struct pm_proc *proc,
 				    uint32_t payload[PAYLOAD_ARG_CNT],
-				    unsigned int *value)
+				    unsigned int *value, size_t count)
 {
 	enum pm_ret_status ret;
 
@@ -237,10 +266,25 @@
 	if (ret != PM_RET_SUCCESS)
 		goto unlock;
 
-	ret = pm_ipi_buff_read(proc, value);
+	ret = pm_ipi_buff_read(proc, value, count);
 
 unlock:
 	bakery_lock_release(&pm_secure_lock);
 
 	return ret;
 }
+
+void pm_ipi_irq_enable(void)
+{
+	mmio_write_32(IPI_APU_IER, IPI_APU_IXR_PMU_0_MASK);
+}
+
+void pm_ipi_irq_disable(void)
+{
+	mmio_write_32(IPI_APU_IDR, IPI_APU_IXR_PMU_0_MASK);
+}
+
+void pm_ipi_irq_clear(void)
+{
+	mmio_write_32(IPI_APU_ISR, IPI_APU_IXR_PMU_0_MASK);
+}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_ipi.h b/plat/xilinx/zynqmp/pm_service/pm_ipi.h
index d92e648..b314b80 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_ipi.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_ipi.h
@@ -39,6 +39,10 @@
 			       uint32_t payload[PAYLOAD_ARG_CNT]);
 enum pm_ret_status pm_ipi_send_sync(const struct pm_proc *proc,
 				    uint32_t payload[PAYLOAD_ARG_CNT],
-				    unsigned int *value);
+				    unsigned int *value, size_t count);
+void pm_ipi_buff_read_callb(unsigned int *value, size_t count);
+void pm_ipi_irq_enable(void);
+void pm_ipi_irq_disable(void);
+void pm_ipi_irq_clear(void);
 
 #endif /* _PM_IPI_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
index 9c08ffb..036ed8a 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
@@ -42,6 +42,8 @@
 #include "pm_ipi.h"
 #include "../zynqmp_private.h"
 
+#define PM_GET_CALLBACK_DATA	0xa01
+
 /* 0 - UP, !0 - DOWN */
 static int32_t pm_down = !0;
 
@@ -68,7 +70,6 @@
  *
  * Called from sip_svc_setup initialization function with the
  * rt_svc_init signature.
- *
  */
 int pm_setup(void)
 {
@@ -152,7 +153,7 @@
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_SYSTEM_SHUTDOWN:
-		ret = pm_system_shutdown(pm_arg[0]);
+		ret = pm_system_shutdown(pm_arg[0], pm_arg[1]);
 		SMC_RET1(handle, (uint64_t)ret);
 
 	case PM_REQ_NODE:
@@ -174,11 +175,19 @@
 
 	case PM_GET_API_VERSION:
 		/* Check is PM API version already verified */
-		if (pm_ctx.api_version == PM_VERSION)
+		if (pm_ctx.api_version == PM_VERSION) {
 			SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
 				 ((uint64_t)PM_VERSION << 32));
+		}
 
 		ret = pm_get_api_version(&pm_ctx.api_version);
+		/*
+		 * Enable IPI IRQ
+		 * assume the rich OS is OK to handle callback IRQs now.
+		 * Even if we were wrong, it would not enable the IRQ in
+		 * the GIC.
+		 */
+		pm_ipi_irq_enable();
 		SMC_RET1(handle, (uint64_t)ret |
 			 ((uint64_t)pm_ctx.api_version << 32));
 
@@ -242,7 +251,23 @@
 	}
 
 	case PM_GET_CHIPID:
-		SMC_RET1(handle, zynqmp_get_silicon_id());
+	{
+		uint32_t result[2];
+
+		ret = pm_get_chipid(result);
+		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32),
+			 result[1]);
+	}
+
+	case PM_GET_CALLBACK_DATA:
+	{
+		uint32_t result[4];
+
+		pm_get_callbackdata(result, sizeof(result));
+		SMC_RET2(handle,
+			 (uint64_t)result[0] | ((uint64_t)result[1] << 32),
+			 (uint64_t)result[2] | ((uint64_t)result[3] << 32));
+	}
 
 	default:
 		WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
diff --git a/plat/xilinx/zynqmp/zynqmp_def.h b/plat/xilinx/zynqmp/zynqmp_def.h
index 65bc25f..bdca3b4 100644
--- a/plat/xilinx/zynqmp/zynqmp_def.h
+++ b/plat/xilinx/zynqmp/zynqmp_def.h
@@ -69,12 +69,9 @@
 /* CRL registers and bitfields */
 #define CRL_APB_BASE			0xFF5E0000
 #define CRL_APB_RPLL_CTRL		(CRL_APB_BASE + 0x30)
-#define CRL_APB_TIMESTAMP_REF_CTRL	(CRL_APB_BASE + 0x128)
 #define CRL_APB_BOOT_MODE_USER		(CRL_APB_BASE + 0x200)
 #define CRL_APB_RESET_CTRL		(CRL_APB_BASE + 0x218)
 
-#define CRL_APB_TIMESTAMP_REF_CTRL_CLKACT_BIT	(1 << 24)
-
 #define CRL_APB_RPLL_CTRL_BYPASS	(1 << 3)
 
 #define CRL_APB_RESET_CTRL_SOFT_RESET	(1 << 4)
@@ -84,11 +81,8 @@
 
 /* system counter registers and bitfields */
 #define IOU_SCNTRS_BASE			0xFF260000
-#define IOU_SCNTRS_CONTROL		(IOU_SCNTRS_BASE + 0)
 #define IOU_SCNTRS_BASEFREQ		(IOU_SCNTRS_BASE + 0x20)
 
-#define IOU_SCNTRS_CONTROL_EN		(1 << 0)
-
 /* APU registers and bitfields */
 #define APU_BASE		0xFD5C0000
 #define APU_CONFIG_0		(APU_BASE + 0x20)
@@ -128,7 +122,6 @@
 #define BASE_GICH_BASE		0xF9040000
 #define BASE_GICV_BASE		0xF9060000
 
-#define IRQ_SEC_IPI_APU			67
 #define ARM_IRQ_SEC_PHY_TIMER		29
 
 #define ARM_IRQ_SEC_SGI_0		8
@@ -146,7 +139,7 @@
  * UART related constants
  ******************************************************************************/
 #define ZYNQMP_UART0_BASE		0xFF000000
-#define ZYNQMP_UART1_BASE		0xFF001000
+#define ZYNQMP_UART1_BASE		0xFF010000
 
 #if ZYNQMP_CONSOLE_IS(cadence)
 # define ZYNQMP_UART_BASE	ZYNQMP_UART0_BASE
diff --git a/plat/xilinx/zynqmp/zynqmp_private.h b/plat/xilinx/zynqmp/zynqmp_private.h
index abcdebc..ddef37b 100644
--- a/plat/xilinx/zynqmp/zynqmp_private.h
+++ b/plat/xilinx/zynqmp/zynqmp_private.h
@@ -39,7 +39,6 @@
 unsigned int zynqmp_get_uart_clk(void);
 int zynqmp_is_pmu_up(void);
 unsigned int zynqmp_get_bootmode(void);
-unsigned int zynqmp_get_silicon_id(void);
 
 /* For FSBL handover */
 void fsbl_atf_handover(entry_point_info_t *bl32_image_ep_info,
diff --git a/services/spd/trusty/sm_err.h b/services/spd/trusty/sm_err.h
new file mode 100644
index 0000000..d7cddec
--- /dev/null
+++ b/services/spd/trusty/sm_err.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LIB_SM_SM_ERR_H
+#define __LIB_SM_SM_ERR_H
+
+/* Errors from the secure monitor */
+#define SM_ERR_UNDEFINED_SMC		0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
+#define SM_ERR_INVALID_PARAMETERS	-2
+#define SM_ERR_INTERRUPTED		-3	/* Got interrupted. Call back with restart SMC */
+#define SM_ERR_UNEXPECTED_RESTART	-4	/* Got an restart SMC when we didn't expect it */
+#define SM_ERR_BUSY			-5	/* Temporarily busy. Call back with original args */
+#define SM_ERR_INTERLEAVED_SMC		-6	/* Got a trusted_service SMC when a restart SMC is required */
+#define SM_ERR_INTERNAL_FAILURE		-7	/* Unknown error */
+#define SM_ERR_NOT_SUPPORTED		-8
+#define SM_ERR_NOT_ALLOWED		-9	/* SMC call not allowed */
+#define SM_ERR_END_OF_INPUT		-10
+
+#endif
diff --git a/services/spd/trusty/smcall.h b/services/spd/trusty/smcall.h
new file mode 100644
index 0000000..7e876c8
--- /dev/null
+++ b/services/spd/trusty/smcall.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LIB_SM_SMCALL_H
+#define __LIB_SM_SMCALL_H
+
+#define SMC_NUM_ENTITIES	64
+#define SMC_NUM_ARGS		4
+#define SMC_NUM_PARAMS		(SMC_NUM_ARGS - 1)
+
+#define SMC_IS_FASTCALL(smc_nr)	((smc_nr) & 0x80000000)
+#define SMC_IS_SMC64(smc_nr)	((smc_nr) & 0x40000000)
+#define SMC_ENTITY(smc_nr)	(((smc_nr) & 0x3F000000) >> 24)
+#define SMC_FUNCTION(smc_nr)	((smc_nr) & 0x0000FFFF)
+
+#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1) << 31) | \
+					     (((smc64) & 0x1) << 30) | \
+					     (((entity) & 0x3F) << 24) | \
+					     ((fn) & 0xFFFF) \
+					    )
+
+#define SMC_FASTCALL_NR(entity, fn)	SMC_NR((entity), (fn), 1, 0)
+#define SMC_STDCALL_NR(entity, fn)	SMC_NR((entity), (fn), 0, 0)
+#define SMC_FASTCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 1, 1)
+#define SMC_STDCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 0, 1)
+
+#define	SMC_ENTITY_ARCH			0	/* ARM Architecture calls */
+#define	SMC_ENTITY_CPU			1	/* CPU Service calls */
+#define	SMC_ENTITY_SIP			2	/* SIP Service calls */
+#define	SMC_ENTITY_OEM			3	/* OEM Service calls */
+#define	SMC_ENTITY_STD			4	/* Standard Service calls */
+#define	SMC_ENTITY_RESERVED		5	/* Reserved for future use */
+#define	SMC_ENTITY_TRUSTED_APP		48	/* Trusted Application calls */
+#define	SMC_ENTITY_TRUSTED_OS		50	/* Trusted OS calls */
+#define SMC_ENTITY_LOGGING              51	/* Used for secure -> nonsecure logging */
+#define	SMC_ENTITY_SECURE_MONITOR	60	/* Trusted OS calls internal to secure monitor */
+
+/* FC = Fast call, SC = Standard call */
+#define SMC_SC_RESTART_LAST	SMC_STDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_SC_NOP		SMC_STDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 1)
+
+/*
+ * Return from secure os to non-secure os with return value in r1
+ */
+#define SMC_SC_NS_RETURN	SMC_STDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 0)
+
+#define SMC_FC_RESERVED		SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_FC_FIQ_EXIT		SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
+#define SMC_FC_REQUEST_FIQ	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2)
+#define SMC_FC_GET_NEXT_IRQ	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3)
+#define SMC_FC_FIQ_ENTER	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4)
+
+#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5)
+#define SMC_FC64_GET_FIQ_REGS	SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6)
+
+#define SMC_FC_CPU_SUSPEND	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7)
+#define SMC_FC_CPU_RESUME	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8)
+
+#define SMC_FC_AARCH_SWITCH	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9)
+#define SMC_FC_GET_VERSION_STR	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10)
+
+/* Trusted OS entity calls */
+#define SMC_SC_VIRTIO_GET_DESCR	  SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
+#define SMC_SC_VIRTIO_START	  SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
+#define SMC_SC_VIRTIO_STOP	  SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
+
+#define SMC_SC_VDEV_RESET	  SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
+#define SMC_SC_VDEV_KICK_VQ	  SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
+
+#endif /* __LIB_SM_SMCALL_H */
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
new file mode 100644
index 0000000..4962d44
--- /dev/null
+++ b/services/spd/trusty/trusty.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <bl31.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <string.h>
+
+#include "smcall.h"
+#include "sm_err.h"
+
+struct trusty_stack {
+	uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
+};
+
+struct trusty_cpu_ctx {
+	cpu_context_t	cpu_ctx;
+	void		*saved_sp;
+	uint32_t	saved_security_state;
+	int		fiq_handler_active;
+	uint64_t	fiq_handler_pc;
+	uint64_t	fiq_handler_cpsr;
+	uint64_t	fiq_handler_sp;
+	uint64_t	fiq_pc;
+	uint64_t	fiq_cpsr;
+	uint64_t	fiq_sp_el1;
+	gp_regs_t	fiq_gpregs;
+	struct trusty_stack	secure_stack;
+};
+
+struct args {
+	uint64_t	r0;
+	uint64_t	r1;
+	uint64_t	r2;
+	uint64_t	r3;
+};
+
+struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
+
+struct args trusty_init_context_stack(void **sp, void *new_stack);
+struct args trusty_context_switch_helper(void **sp, uint64_t r0, uint64_t r1,
+					 uint64_t r2, uint64_t r3);
+
+static struct trusty_cpu_ctx *get_trusty_ctx(void)
+{
+	return &trusty_cpu_ctx[plat_my_core_pos()];
+}
+
+static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
+					 uint64_t r1, uint64_t r2, uint64_t r3)
+{
+	struct args ret;
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+	assert(ctx->saved_security_state != security_state);
+
+	cm_el1_sysregs_context_save(security_state);
+
+	ctx->saved_security_state = security_state;
+	ret = trusty_context_switch_helper(&ctx->saved_sp, r0, r1, r2, r3);
+
+	assert(ctx->saved_security_state == !security_state);
+
+	cm_el1_sysregs_context_restore(security_state);
+	cm_set_next_eret_context(security_state);
+
+	return ret;
+}
+
+static uint64_t trusty_fiq_handler(uint32_t id,
+				   uint32_t flags,
+				   void *handle,
+				   void *cookie)
+{
+	struct args ret;
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+	assert(!is_caller_secure(flags));
+
+	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
+	if (ret.r0) {
+		SMC_RET0(handle);
+	}
+
+	if (ctx->fiq_handler_active) {
+		INFO("%s: fiq handler already active\n", __func__);
+		SMC_RET0(handle);
+	}
+
+	ctx->fiq_handler_active = 1;
+	memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
+	ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
+	ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
+	ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);
+
+	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
+	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr);
+
+	SMC_RET0(handle);
+}
+
+static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
+			uint64_t handler, uint64_t stack)
+{
+	struct trusty_cpu_ctx *ctx;
+
+	if (cpu >= PLATFORM_CORE_COUNT) {
+		ERROR("%s: cpu %ld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
+		return SM_ERR_INVALID_PARAMETERS;
+	}
+
+	ctx = &trusty_cpu_ctx[cpu];
+	ctx->fiq_handler_pc = handler;
+	ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
+	ctx->fiq_handler_sp = stack;
+
+	SMC_RET1(handle, 0);
+}
+
+static uint64_t trusty_get_fiq_regs(void *handle)
+{
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+	uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0);
+
+	SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1);
+}
+
+static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+	struct args ret;
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+	if (!ctx->fiq_handler_active) {
+		NOTICE("%s: fiq handler not active\n", __func__);
+		SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS);
+	}
+
+	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
+	if (ret.r0 != 1) {
+		INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %ld\n",
+		       __func__, handle, ret.r0);
+	}
+
+	/*
+	 * Restore register state to state recorded on fiq entry.
+	 *
+	 * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
+	 * restore them.
+	 *
+	 * x1-x4 and x8-x17 need to be restored here because smc_handler64
+	 * corrupts them (el1 code also restored them).
+	 */
+	memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
+	ctx->fiq_handler_active = 0;
+	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
+	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr);
+
+	SMC_RET0(handle);
+}
+
+static uint64_t trusty_smc_handler(uint32_t smc_fid,
+			 uint64_t x1,
+			 uint64_t x2,
+			 uint64_t x3,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags)
+{
+	struct args ret;
+
+	if (is_caller_secure(flags)) {
+		if (smc_fid == SMC_SC_NS_RETURN) {
+			ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
+			SMC_RET4(handle, ret.r0, ret.r1, ret.r2, ret.r3);
+		}
+		INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
+		     cpu %d, unknown smc\n",
+		     __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags,
+		     plat_my_core_pos());
+		SMC_RET1(handle, SMC_UNK);
+	} else {
+		switch (smc_fid) {
+		case SMC_FC64_SET_FIQ_HANDLER:
+			return trusty_set_fiq_handler(handle, x1, x2, x3);
+		case SMC_FC64_GET_FIQ_REGS:
+			return trusty_get_fiq_regs(handle);
+		case SMC_FC_FIQ_EXIT:
+			return trusty_fiq_exit(handle, x1, x2, x3);
+		default:
+			ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
+				x2, x3);
+			SMC_RET1(handle, ret.r0);
+		}
+	}
+}
+
+static int32_t trusty_init(void)
+{
+	void el3_exit();
+	entry_point_info_t *ep_info;
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+	uint32_t cpu = plat_my_core_pos();
+	int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
+			       CTX_SPSR_EL3));
+
+	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+
+	cm_el1_sysregs_context_save(NON_SECURE);
+
+	cm_set_context(&ctx->cpu_ctx, SECURE);
+	cm_init_my_context(ep_info);
+
+	/*
+	 * Adjust secondary cpu entry point for 32 bit images to the
+	 * end of exeption vectors
+	 */
+	if ((cpu != 0) && (reg_width == MODE_RW_32)) {
+		INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
+		     cpu, ep_info->pc + (1U << 5));
+		cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
+	}
+
+	cm_el1_sysregs_context_restore(SECURE);
+	cm_set_next_eret_context(SECURE);
+
+	ctx->saved_security_state = ~0; /* initial saved state is invalid */
+	trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack);
+
+	trusty_context_switch_helper(&ctx->saved_sp, 0, 0, 0, 0);
+
+	cm_el1_sysregs_context_restore(NON_SECURE);
+	cm_set_next_eret_context(NON_SECURE);
+
+	return 0;
+}
+
+static void trusty_cpu_suspend(void)
+{
+	struct args ret;
+	unsigned int linear_id = plat_my_core_pos();
+
+	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, 0, 0, 0);
+	if (ret.r0 != 0) {
+		INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %ld\n",
+		     __func__, linear_id, ret.r0);
+	}
+}
+
+static void trusty_cpu_resume(void)
+{
+	struct args ret;
+	unsigned int linear_id = plat_my_core_pos();
+
+	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, 0, 0, 0);
+	if (ret.r0 != 0) {
+		INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %ld\n",
+		     __func__, linear_id, ret.r0);
+	}
+}
+
+static int32_t trusty_cpu_off_handler(uint64_t unused)
+{
+	trusty_cpu_suspend();
+
+	return 0;
+}
+
+static void trusty_cpu_on_finish_handler(uint64_t unused)
+{
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+	if (!ctx->saved_sp) {
+		trusty_init();
+	} else {
+		trusty_cpu_resume();
+	}
+}
+
+static void trusty_cpu_suspend_handler(uint64_t unused)
+{
+	trusty_cpu_suspend();
+}
+
+static void trusty_cpu_suspend_finish_handler(uint64_t unused)
+{
+	trusty_cpu_resume();
+}
+
+static const spd_pm_ops_t trusty_pm = {
+	.svc_off = trusty_cpu_off_handler,
+	.svc_suspend = trusty_cpu_suspend_handler,
+	.svc_on_finish = trusty_cpu_on_finish_handler,
+	.svc_suspend_finish = trusty_cpu_suspend_finish_handler,
+};
+
+static int32_t trusty_setup(void)
+{
+	entry_point_info_t *ep_info;
+	uint32_t instr;
+	uint32_t flags;
+	int ret;
+	int aarch32 = 0;
+
+	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+	if (!ep_info) {
+		INFO("Trusty image missing.\n");
+		return -1;
+	}
+
+	instr = *(uint32_t *)ep_info->pc;
+
+	if (instr >> 24 == 0xea) {
+		INFO("trusty: Found 32 bit image\n");
+		aarch32 = 1;
+	} else if (instr >> 8 == 0xd53810) {
+		INFO("trusty: Found 64 bit image\n");
+	} else {
+		INFO("trusty: Found unknown image, 0x%x\n", instr);
+	}
+
+	SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+	if (!aarch32)
+		ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS);
+	else
+		ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+					    SPSR_E_LITTLE,
+					    DAIF_FIQ_BIT |
+					    DAIF_IRQ_BIT |
+					    DAIF_ABT_BIT);
+
+	bl31_register_bl32_init(trusty_init);
+
+	psci_register_spd_pm_hook(&trusty_pm);
+
+	flags = 0;
+	set_interrupt_rm_flag(flags, NON_SECURE);
+	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+					      trusty_fiq_handler,
+					      flags);
+	if (ret)
+		ERROR("trusty: failed to register fiq handler, ret = %d\n", ret);
+
+	return 0;
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	trusty_fast,
+
+	OEN_TOS_START,
+	SMC_ENTITY_SECURE_MONITOR,
+	SMC_TYPE_FAST,
+	trusty_setup,
+	trusty_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for standard SMC calls */
+DECLARE_RT_SVC(
+	trusty_std,
+
+	OEN_TOS_START,
+	SMC_ENTITY_SECURE_MONITOR,
+	SMC_TYPE_STD,
+	NULL,
+	trusty_smc_handler
+);
diff --git a/services/spd/trusty/trusty.mk b/services/spd/trusty/trusty.mk
new file mode 100644
index 0000000..9f53515
--- /dev/null
+++ b/services/spd/trusty/trusty.mk
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPD_INCLUDES		:=
+
+SPD_SOURCES		:=	services/spd/trusty/trusty.c		\
+				services/spd/trusty/trusty_helpers.S
diff --git a/services/spd/trusty/trusty_helpers.S b/services/spd/trusty/trusty_helpers.S
new file mode 100644
index 0000000..9bbb044
--- /dev/null
+++ b/services/spd/trusty/trusty_helpers.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+
+.macro push ra, rb, sp=sp
+	stp \ra, \rb, [\sp,#-16]!
+.endm
+
+.macro pop ra, rb, sp=sp
+	ldp \ra, \rb, [\sp], #16
+.endm
+
+	.global trusty_context_switch_helper
+func trusty_context_switch_helper
+	push	x8, xzr
+	push	x19, x20
+	push	x21, x22
+	push	x23, x24
+	push	x25, x26
+	push	x27, x28
+	push	x29, x30
+
+	mov	x9, sp
+	ldr	x10, [x0]
+	mov	sp, x10
+	str	x9, [x0]
+
+	pop	x29, x30
+	pop	x27, x28
+	pop	x25, x26
+	pop	x23, x24
+	pop	x21, x22
+	pop	x19, x20
+	pop	x8, xzr
+	stp	x1, x2, [x8]
+	stp	x3, x4, [x8, #16]
+
+	ret
+endfunc trusty_context_switch_helper
+
+	.global trusty_init_context_stack
+func trusty_init_context_stack
+	push	x8, xzr, x1
+	push	xzr, xzr, x1
+	push	xzr, xzr, x1
+	push	xzr, xzr, x1
+	push	xzr, xzr, x1
+	push	xzr, xzr, x1
+	adr	x9, el3_exit
+	push	xzr, x9, x1
+	str	x1, [x0]
+	ret
+endfunc trusty_init_context_stack