Merge pull request #1760 from igoropaniuk/rpi3_preloaded_dtb_fix

rpi3: fix RPI3_PRELOADED_DTB_BASE usage
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
index 777115a..1af1962 100644
--- a/bl31/bl31.ld.S
+++ b/bl31/bl31.ld.S
@@ -143,6 +143,10 @@
            "cpu_ops not defined for this platform.")
 
 #if ENABLE_SPM
+#ifndef SPM_SHIM_EXCEPTIONS_VMA
+#define SPM_SHIM_EXCEPTIONS_VMA         RAM
+#endif
+
     /*
      * Exception vectors of the SPM shim layer. They must be aligned to a 2K
      * address, but we need to place them in a separate page so that we can set
@@ -156,7 +160,10 @@
         *(.spm_shim_exceptions)
         . = ALIGN(PAGE_SIZE);
         __SPM_SHIM_EXCEPTIONS_END__ = .;
-    } >RAM
+    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
+
+    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
+    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
 #endif
 
     /*
diff --git a/docs/plat/nvidia-tegra.rst b/docs/plat/nvidia-tegra.rst
index 56dfacf..e244c1c 100644
--- a/docs/plat/nvidia-tegra.rst
+++ b/docs/plat/nvidia-tegra.rst
@@ -80,6 +80,8 @@
 uint64\_t tzdram\_base;
 /* UART port ID \*/
 int uart\_id;
+/* L2 ECC parity protection disable flag \*/
+int l2\_ecc\_parity\_prot\_dis;
 } plat\_params\_from\_bl2\_t;
 
 Power Management
diff --git a/include/plat/arm/common/arm_spm_def.h b/include/plat/arm/common/arm_spm_def.h
index 1c6f6e7..997e156 100644
--- a/include/plat/arm/common/arm_spm_def.h
+++ b/include/plat/arm/common/arm_spm_def.h
@@ -86,12 +86,12 @@
  * requests. Mapped as RW and NS. Placed after the shared memory between EL3 and
  * S-EL0.
  */
-#define ARM_SP_IMAGE_NS_BUF_BASE	(PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE)
-#define ARM_SP_IMAGE_NS_BUF_SIZE	ULL(0x10000)
+#define PLAT_SP_IMAGE_NS_BUF_BASE	(PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE)
+#define PLAT_SP_IMAGE_NS_BUF_SIZE	ULL(0x10000)
 #define ARM_SP_IMAGE_NS_BUF_MMAP	MAP_REGION2(				\
-						ARM_SP_IMAGE_NS_BUF_BASE,	\
-						ARM_SP_IMAGE_NS_BUF_BASE,	\
-						ARM_SP_IMAGE_NS_BUF_SIZE,	\
+						PLAT_SP_IMAGE_NS_BUF_BASE,	\
+						PLAT_SP_IMAGE_NS_BUF_BASE,	\
+						PLAT_SP_IMAGE_NS_BUF_SIZE,	\
 						MT_RW_DATA | MT_NS | MT_USER,	\
 						PAGE_SIZE)
 
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
index 6281608..d1c03be 100644
--- a/include/plat/arm/common/plat_arm.h
+++ b/include/plat/arm/common/plat_arm.h
@@ -46,8 +46,8 @@
 		PLAT_ARM_TZC_NS_DEV_ACCESS}, 				\
 	{ARM_DRAM2_BASE, ARM_DRAM2_END, ARM_TZC_NS_DRAM_S_ACCESS,	\
 		PLAT_ARM_TZC_NS_DEV_ACCESS},				\
-	{ARM_SP_IMAGE_NS_BUF_BASE, (ARM_SP_IMAGE_NS_BUF_BASE +		\
-		ARM_SP_IMAGE_NS_BUF_SIZE) - 1, TZC_REGION_S_NONE,	\
+	{PLAT_SP_IMAGE_NS_BUF_BASE, (PLAT_SP_IMAGE_NS_BUF_BASE +	\
+		PLAT_SP_IMAGE_NS_BUF_SIZE) - 1, TZC_REGION_S_NONE,	\
 		PLAT_ARM_TZC_NS_DEV_ACCESS}
 
 #else
diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c
index 1a27fc4..4da807a 100644
--- a/plat/arm/board/fvp/fvp_common.c
+++ b/plat/arm/board/fvp/fvp_common.c
@@ -215,12 +215,12 @@
 	.sp_image_base       = ARM_SP_IMAGE_BASE,
 	.sp_stack_base       = PLAT_SP_IMAGE_STACK_BASE,
 	.sp_heap_base        = ARM_SP_IMAGE_HEAP_BASE,
-	.sp_ns_comm_buf_base = ARM_SP_IMAGE_NS_BUF_BASE,
+	.sp_ns_comm_buf_base = PLAT_SP_IMAGE_NS_BUF_BASE,
 	.sp_shared_buf_base  = PLAT_SPM_BUF_BASE,
 	.sp_image_size       = ARM_SP_IMAGE_SIZE,
 	.sp_pcpu_stack_size  = PLAT_SP_IMAGE_STACK_PCPU_SIZE,
 	.sp_heap_size        = ARM_SP_IMAGE_HEAP_SIZE,
-	.sp_ns_comm_buf_size = ARM_SP_IMAGE_NS_BUF_SIZE,
+	.sp_ns_comm_buf_size = PLAT_SP_IMAGE_NS_BUF_SIZE,
 	.sp_shared_buf_size  = PLAT_SPM_BUF_SIZE,
 	.num_sp_mem_regions  = ARM_SP_IMAGE_NUM_MEM_REGIONS,
 	.num_cpus            = PLATFORM_CORE_COUNT,
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
index ca4bd53..31f06a9 100644
--- a/plat/arm/board/fvp/include/platform_def.h
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -271,8 +271,8 @@
 #define PLAT_ARM_PRIVATE_SDEI_EVENTS	ARM_SDEI_PRIVATE_EVENTS
 #define PLAT_ARM_SHARED_SDEI_EVENTS	ARM_SDEI_SHARED_EVENTS
 
-#define PLAT_ARM_SP_IMAGE_STACK_BASE	(ARM_SP_IMAGE_NS_BUF_BASE +	\
-					 ARM_SP_IMAGE_NS_BUF_SIZE)
+#define PLAT_ARM_SP_IMAGE_STACK_BASE	(PLAT_SP_IMAGE_NS_BUF_BASE +	\
+					 PLAT_SP_IMAGE_NS_BUF_SIZE)
 
 #define PLAT_SP_PRI			PLAT_RAS_PRI
 
diff --git a/plat/arm/css/sgi/include/sgi_base_platform_def.h b/plat/arm/css/sgi/include/sgi_base_platform_def.h
index ad7ab81..4afaae1 100644
--- a/plat/arm/css/sgi/include/sgi_base_platform_def.h
+++ b/plat/arm/css/sgi/include/sgi_base_platform_def.h
@@ -147,8 +147,8 @@
 /* Allocate 128KB for CPER buffers */
 #define PLAT_SP_BUF_BASE			ULL(0x20000)
 
-#define PLAT_ARM_SP_IMAGE_STACK_BASE		(ARM_SP_IMAGE_NS_BUF_BASE + \
-						ARM_SP_IMAGE_NS_BUF_SIZE + \
+#define PLAT_ARM_SP_IMAGE_STACK_BASE		(PLAT_SP_IMAGE_NS_BUF_BASE + \
+						PLAT_SP_IMAGE_NS_BUF_SIZE + \
 						PLAT_SP_BUF_BASE)
 
 /* Platform specific SMC FID's used for RAS */
@@ -171,8 +171,8 @@
 	SDEI_EXPLICIT_EVENT(SGI_SDEI_DS_EVENT_1, SDEI_MAPF_CRITICAL),
 #define PLAT_ARM_SHARED_SDEI_EVENTS
 
-#define ARM_SP_CPER_BUF_BASE			(ARM_SP_IMAGE_NS_BUF_BASE + \
-						ARM_SP_IMAGE_NS_BUF_SIZE)
+#define ARM_SP_CPER_BUF_BASE			(PLAT_SP_IMAGE_NS_BUF_BASE + \
+						PLAT_SP_IMAGE_NS_BUF_SIZE)
 #define ARM_SP_CPER_BUF_SIZE			ULL(0x20000)
 #define ARM_SP_CPER_BUF_MMAP			MAP_REGION2(		\
 						ARM_SP_CPER_BUF_BASE,	\
@@ -182,8 +182,8 @@
 						PAGE_SIZE)
 
 #else
-#define PLAT_ARM_SP_IMAGE_STACK_BASE	(ARM_SP_IMAGE_NS_BUF_BASE +	\
-					 ARM_SP_IMAGE_NS_BUF_SIZE)
+#define PLAT_ARM_SP_IMAGE_STACK_BASE	(PLAT_SP_IMAGE_NS_BUF_BASE +	\
+					 PLAT_SP_IMAGE_NS_BUF_SIZE)
 #endif /* RAS_EXTENSION */
 
 /* Platform ID address */
diff --git a/plat/arm/css/sgi/sgi_plat.c b/plat/arm/css/sgi/sgi_plat.c
index ca90aad..72cda7f 100644
--- a/plat/arm/css/sgi/sgi_plat.c
+++ b/plat/arm/css/sgi/sgi_plat.c
@@ -124,12 +124,12 @@
 	.sp_image_base       = ARM_SP_IMAGE_BASE,
 	.sp_stack_base       = PLAT_SP_IMAGE_STACK_BASE,
 	.sp_heap_base        = ARM_SP_IMAGE_HEAP_BASE,
-	.sp_ns_comm_buf_base = ARM_SP_IMAGE_NS_BUF_BASE,
+	.sp_ns_comm_buf_base = PLAT_SP_IMAGE_NS_BUF_BASE,
 	.sp_shared_buf_base  = PLAT_SPM_BUF_BASE,
 	.sp_image_size       = ARM_SP_IMAGE_SIZE,
 	.sp_pcpu_stack_size  = PLAT_SP_IMAGE_STACK_PCPU_SIZE,
 	.sp_heap_size        = ARM_SP_IMAGE_HEAP_SIZE,
-	.sp_ns_comm_buf_size = ARM_SP_IMAGE_NS_BUF_SIZE,
+	.sp_ns_comm_buf_size = PLAT_SP_IMAGE_NS_BUF_SIZE,
 	.sp_shared_buf_size  = PLAT_SPM_BUF_SIZE,
 	.num_sp_mem_regions  = ARM_SP_IMAGE_NUM_MEM_REGIONS,
 	.num_cpus            = PLATFORM_CORE_COUNT,
diff --git a/plat/imx/common/imx_sip_handler.c b/plat/imx/common/imx_sip_handler.c
new file mode 100644
index 0000000..22a7f2b
--- /dev/null
+++ b/plat/imx/common/imx_sip_handler.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2019 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <std_svc.h>
+#include <platform_def.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <imx_sip_svc.h>
+#include <sci/sci.h>
+
+#ifdef PLAT_IMX8QM
+const static int ap_cluster_index[PLATFORM_CLUSTER_COUNT] = {
+	SC_R_A53, SC_R_A72,
+};
+#endif
+
+static int imx_srtc_set_time(uint32_t year_mon,
+			unsigned long day_hour,
+			unsigned long min_sec)
+{
+	return sc_timer_set_rtc_time(ipc_handle,
+		year_mon >> 16, year_mon & 0xffff,
+		day_hour >> 16, day_hour & 0xffff,
+		min_sec >> 16, min_sec & 0xffff);
+}
+
+int imx_srtc_handler(uint32_t smc_fid,
+		    void *handle,
+		    u_register_t x1,
+		    u_register_t x2,
+		    u_register_t x3,
+		    u_register_t x4)
+{
+	int ret;
+
+	switch (x1) {
+	case IMX_SIP_SRTC_SET_TIME:
+		ret = imx_srtc_set_time(x2, x3, x4);
+		break;
+	default:
+		ret = SMC_UNK;
+	}
+
+	SMC_RET1(handle, ret);
+}
+
+static void imx_cpufreq_set_target(uint32_t cluster_id, unsigned long freq)
+{
+	sc_pm_clock_rate_t rate = (sc_pm_clock_rate_t)freq;
+
+#ifdef PLAT_IMX8QM
+	sc_pm_set_clock_rate(ipc_handle, ap_cluster_index[cluster_id], SC_PM_CLK_CPU, &rate);
+#endif
+#ifdef PLAT_IMX8QX
+	sc_pm_set_clock_rate(ipc_handle, SC_R_A35, SC_PM_CLK_CPU, &rate);
+#endif
+}
+
+int imx_cpufreq_handler(uint32_t smc_fid,
+		    u_register_t x1,
+		    u_register_t x2,
+		    u_register_t x3)
+{
+	switch (x1) {
+	case IMX_SIP_SET_CPUFREQ:
+		imx_cpufreq_set_target(x2, x3);
+		break;
+	default:
+		return SMC_UNK;
+	}
+
+	return 0;
+}
diff --git a/plat/imx/common/imx_sip_svc.c b/plat/imx/common/imx_sip_svc.c
new file mode 100644
index 0000000..89b9df8
--- /dev/null
+++ b/plat/imx/common/imx_sip_svc.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/pmf/pmf.h>
+#include <tools_share/uuid.h>
+#include <imx_sip_svc.h>
+
+static int32_t imx_sip_setup(void)
+{
+	return 0;
+}
+
+static uintptr_t imx_sip_handler(unsigned int smc_fid,
+			u_register_t x1,
+			u_register_t x2,
+			u_register_t x3,
+			u_register_t x4,
+			void *cookie,
+			void *handle,
+			u_register_t flags)
+{
+	switch (smc_fid) {
+#if (defined(PLAT_IMX8QM) || defined(PLAT_IMX8QX))
+	case  IMX_SIP_SRTC:
+		return imx_srtc_handler(smc_fid, handle, x1, x2, x3, x4);
+	case  IMX_SIP_CPUFREQ:
+		SMC_RET1(handle, imx_cpufreq_handler(smc_fid, x1, x2, x3));
+		break;
+#endif
+	default:
+		WARN("Unimplemented i.MX SiP Service Call: 0x%x\n", smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+		break;
+	}
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+		imx_sip_svc,
+		OEN_SIP_START,
+		OEN_SIP_END,
+		SMC_TYPE_FAST,
+		imx_sip_setup,
+		imx_sip_handler
+);
diff --git a/plat/imx/common/include/imx_sip_svc.h b/plat/imx/common/include/imx_sip_svc.h
new file mode 100644
index 0000000..4ba7b87
--- /dev/null
+++ b/plat/imx/common/include/imx_sip_svc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IMX_SIP_SVC_H__
+#define __IMX_SIP_SVC_H__
+
+/* SMC function IDs for SiP Service queries */
+#define IMX_SIP_CPUFREQ			0xC2000001
+#define IMX_SIP_SET_CPUFREQ		0x00
+
+#define IMX_SIP_SRTC			0xC2000002
+#define IMX_SIP_SRTC_SET_TIME		0x00
+
+#if (defined(PLAT_IMX8QM) || defined(PLAT_IMX8QX))
+int imx_cpufreq_handler(uint32_t smc_fid, u_register_t x1,
+			u_register_t x2, u_register_t x3);
+int imx_srtc_handler(uint32_t smc_fid, void *handle, u_register_t x1,
+		     u_register_t x2, u_register_t x3, u_register_t x4);
+#endif
+
+#endif /* __IMX_SIP_SVC_H__ */
diff --git a/plat/imx/common/include/sci/sci.h b/plat/imx/common/include/sci/sci.h
index f0a2502..1b2f699 100644
--- a/plat/imx/common/include/sci/sci.h
+++ b/plat/imx/common/include/sci/sci.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -15,5 +15,6 @@
 #include <sci/svc/pad/sci_pad_api.h>
 #include <sci/svc/pm/sci_pm_api.h>
 #include <sci/svc/rm/sci_rm_api.h>
+#include <sci/svc/timer/sci_timer_api.h>
 
 #endif /* SCI_H */
diff --git a/plat/imx/common/include/sci/svc/timer/sci_timer_api.h b/plat/imx/common/include/sci/svc/timer/sci_timer_api.h
new file mode 100644
index 0000000..f8423ab
--- /dev/null
+++ b/plat/imx/common/include/sci/svc/timer/sci_timer_api.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*!
+ * Header file containing the public API for the System Controller (SC)
+ * Timer function.
+ *
+ * @addtogroup TIMER_SVC (SVC) Timer Service
+ *
+ * Module for the Timer service. This includes support for the watchdog, RTC,
+ * and system counter. Note every resource partition has a watchdog it can
+ * use.
+ *
+ * @{
+ */
+
+#ifndef SC_TIMER_API_H
+#define SC_TIMER_API_H
+
+/* Includes */
+
+#include <sci/sci_types.h>
+
+/* Defines */
+
+/*!
+ * @name Defines for type widths
+ */
+/*@{*/
+#define SC_TIMER_ACTION_W   3U	/* Width of sc_timer_wdog_action_t */
+/*@}*/
+
+/*!
+ * @name Defines for sc_timer_wdog_action_t
+ */
+/*@{*/
+#define SC_TIMER_WDOG_ACTION_PARTITION      0U	/* Reset partition */
+#define SC_TIMER_WDOG_ACTION_WARM           1U	/* Warm reset system */
+#define SC_TIMER_WDOG_ACTION_COLD           2U	/* Cold reset system */
+#define SC_TIMER_WDOG_ACTION_BOARD          3U	/* Reset board */
+#define SC_TIMER_WDOG_ACTION_IRQ            4U	/* Only generate IRQs */
+/*@}*/
+
+/* Types */
+
+/*!
+ * This type is used to configure the watchdog action.
+ */
+typedef uint8_t sc_timer_wdog_action_t;
+
+/*!
+ * This type is used to declare a watchdog time value in milliseconds.
+ */
+typedef uint32_t sc_timer_wdog_time_t;
+
+/* Functions */
+
+/*!
+ * @name Watchdog Functions
+ * @{
+ */
+
+/*!
+ * This function sets the watchdog timeout in milliseconds. If not
+ * set then the timeout defaults to the max. Once locked this value
+ * cannot be changed.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     timeout     timeout period for the watchdog
+ *
+ * @return Returns an error code (SC_ERR_NONE = success, SC_ERR_LOCKED
+ *         = locked).
+ */
+sc_err_t sc_timer_set_wdog_timeout(sc_ipc_t ipc, sc_timer_wdog_time_t timeout);
+
+/*!
+ * This function sets the watchdog pre-timeout in milliseconds. If not
+ * set then the pre-timeout defaults to the max. Once locked this value
+ * cannot be changed.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     pre_timeout pre-timeout period for the watchdog
+ *
+ * When the pre-timeout expires an IRQ will be generated. Note this timeout
+ * clears when the IRQ is triggered. An IRQ is generated for the failing
+ * partition and all of its child partitions.
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ */
+sc_err_t sc_timer_set_wdog_pre_timeout(sc_ipc_t ipc,
+				       sc_timer_wdog_time_t pre_timeout);
+
+/*!
+ * This function starts the watchdog.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     lock        boolean indicating the lock status
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * If \a lock is set then the watchdog cannot be stopped or the timeout
+ * period changed.
+ */
+sc_err_t sc_timer_start_wdog(sc_ipc_t ipc, sc_bool_t lock);
+
+/*!
+ * This function stops the watchdog if it is not locked.
+ *
+ * @param[in]     ipc         IPC handle
+ *
+ * @return Returns an error code (SC_ERR_NONE = success, SC_ERR_LOCKED
+ *         = locked).
+ */
+sc_err_t sc_timer_stop_wdog(sc_ipc_t ipc);
+
+/*!
+ * This function pings (services, kicks) the watchdog resetting the time
+ * before expiration back to the timeout.
+ *
+ * @param[in]     ipc         IPC handle
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ */
+sc_err_t sc_timer_ping_wdog(sc_ipc_t ipc);
+
+/*!
+ * This function gets the status of the watchdog. All arguments are
+ * in milliseconds.
+ *
+ * @param[in]     ipc             IPC handle
+ * @param[out]    timeout         pointer to return the timeout
+ * @param[out]    max_timeout     pointer to return the max timeout
+ * @param[out]    remaining_time  pointer to return the time remaining
+ *                                until trigger
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ */
+sc_err_t sc_timer_get_wdog_status(sc_ipc_t ipc,
+				  sc_timer_wdog_time_t *timeout,
+				  sc_timer_wdog_time_t *max_timeout,
+				  sc_timer_wdog_time_t *remaining_time);
+
+/*!
+ * This function gets the status of the watchdog of a partition. All
+ * arguments are in milliseconds.
+ *
+ * @param[in]     ipc             IPC handle
+ * @param[in]     pt              partition to query
+ * @param[out]    enb             pointer to return enable status
+ * @param[out]    timeout         pointer to return the timeout
+ * @param[out]    remaining_time  pointer to return the time remaining
+ *                                until trigger
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ */
+sc_err_t sc_timer_pt_get_wdog_status(sc_ipc_t ipc, sc_rm_pt_t pt,
+				     sc_bool_t *enb,
+				     sc_timer_wdog_time_t *timeout,
+				     sc_timer_wdog_time_t *remaining_time);
+
+/*!
+ * This function configures the action to be taken when a watchdog
+ * expires.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     pt          partition to affect
+ * @param[in]     action      action to take
+ *
+ * Default action is inherited from the parent.
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * Return errors:
+ * - SC_ERR_PARM if invalid parameters,
+ * - SC_ERR_NOACCESS if caller's partition is not the SYSTEM owner,
+ * - SC_ERR_LOCKED if the watchdog is locked
+ */
+sc_err_t sc_timer_set_wdog_action(sc_ipc_t ipc,
+				  sc_rm_pt_t pt, sc_timer_wdog_action_t action);
+
+/* @} */
+
+/*!
+ * @name Real-Time Clock (RTC) Functions
+ * @{
+ */
+
+/*!
+ * This function sets the RTC time. Only the owner of the SC_R_SYSTEM
+ * resource can set the time.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     year        year (min 1970)
+ * @param[in]     mon         month (1-12)
+ * @param[in]     day         day of the month (1-31)
+ * @param[in]     hour        hour (0-23)
+ * @param[in]     min         minute (0-59)
+ * @param[in]     sec         second (0-59)
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * Return errors:
+ * - SC_ERR_PARM if invalid time/date parameters,
+ * - SC_ERR_NOACCESS if caller's partition is not the SYSTEM owner
+ */
+sc_err_t sc_timer_set_rtc_time(sc_ipc_t ipc, uint16_t year, uint8_t mon,
+			       uint8_t day, uint8_t hour, uint8_t min,
+			       uint8_t sec);
+
+/*!
+ * This function gets the RTC time.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[out]    year        pointer to return year (min 1970)
+ * @param[out]    mon         pointer to return month (1-12)
+ * @param[out]    day         pointer to return day of the month (1-31)
+ * @param[out]    hour        pointer to return hour (0-23)
+ * @param[out]    min         pointer to return minute (0-59)
+ * @param[out]    sec         pointer to return second (0-59)
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ */
+sc_err_t sc_timer_get_rtc_time(sc_ipc_t ipc, uint16_t *year, uint8_t *mon,
+			       uint8_t *day, uint8_t *hour, uint8_t *min,
+			       uint8_t *sec);
+
+/*!
+ * This function gets the RTC time in seconds since 1/1/1970.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[out]    sec         pointer to return second
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ */
+sc_err_t sc_timer_get_rtc_sec1970(sc_ipc_t ipc, uint32_t *sec);
+
+/*!
+ * This function sets the RTC alarm.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     year        year (min 1970)
+ * @param[in]     mon         month (1-12)
+ * @param[in]     day         day of the month (1-31)
+ * @param[in]     hour        hour (0-23)
+ * @param[in]     min         minute (0-59)
+ * @param[in]     sec         second (0-59)
+ *
+ * Note this alarm setting clears when the alarm is triggered.
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * Return errors:
+ * - SC_ERR_PARM if invalid time/date parameters
+ */
+sc_err_t sc_timer_set_rtc_alarm(sc_ipc_t ipc, uint16_t year, uint8_t mon,
+				uint8_t day, uint8_t hour, uint8_t min,
+				uint8_t sec);
+
+/*!
+ * This function sets the RTC alarm (periodic mode).
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     sec         period in seconds
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * Return errors:
+ * - SC_ERR_PARM if invalid time/date parameters
+ */
+sc_err_t sc_timer_set_rtc_periodic_alarm(sc_ipc_t ipc, uint32_t sec);
+
+/*!
+ * This function cancels the RTC alarm.
+ *
+ * @param[in]     ipc         IPC handle
+ *
+ * Note this alarm setting clears when the alarm is triggered.
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * Return errors:
+ * - SC_ERR_PARM if invalid time/date parameters
+ */
+sc_err_t sc_timer_cancel_rtc_alarm(sc_ipc_t ipc);
+
+/*!
+ * This function sets the RTC calibration value. Only the owner of the SC_R_SYSTEM
+ * resource can set the calibration.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     count       calbration count (-16 to 15)
+ *
+ * The calibration value is a 5-bit value including the sign bit, which is
+ * implemented in 2's complement. It is added or subtracted from the RTC on
+ * a perdiodic basis, once per 32768 cycles of the RTC clock.
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ */
+sc_err_t sc_timer_set_rtc_calb(sc_ipc_t ipc, int8_t count);
+
+/* @} */
+
+/*!
+ * @name System Counter (SYSCTR) Functions
+ * @{
+ */
+
+/*!
+ * This function sets the SYSCTR alarm.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     ticks       number of 8MHz cycles
+ *
+ * Note this alarm setting clears when the alarm is triggered.
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * Return errors:
+ * - SC_ERR_PARM if invalid time/date parameters
+ */
+sc_err_t sc_timer_set_sysctr_alarm(sc_ipc_t ipc, uint64_t ticks);
+
+/*!
+ * This function sets the SYSCTR alarm (periodic mode).
+ *
+ * @param[in]     ipc          IPC handle
+ * @param[in]     ticks        number of 8MHz cycles
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * Return errors:
+ * - SC_ERR_PARM if invalid time/date parameters
+ */
+sc_err_t sc_timer_set_sysctr_periodic_alarm(sc_ipc_t ipc, uint64_t ticks);
+
+/*!
+ * This function cancels the SYSCTR alarm.
+ *
+ * @param[in]     ipc         IPC handle
+ *
+ * Note this alarm setting clears when the alarm is triggered.
+ *
+ * @return Returns an error code (SC_ERR_NONE = success).
+ *
+ * Return errors:
+ * - SC_ERR_PARM if invalid time/date parameters
+ */
+sc_err_t sc_timer_cancel_sysctr_alarm(sc_ipc_t ipc);
+
+/* @} */
+
+#endif				/* SC_TIMER_API_H */
+
+/**@}*/
diff --git a/plat/imx/common/sci/sci_api.mk b/plat/imx/common/sci/sci_api.mk
index b277877..edaef4d 100644
--- a/plat/imx/common/sci/sci_api.mk
+++ b/plat/imx/common/sci/sci_api.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -8,4 +8,5 @@
 			plat/imx/common/sci/imx8_mu.c			\
 			plat/imx/common/sci/svc/pad/pad_rpc_clnt.c	\
 			plat/imx/common/sci/svc/pm/pm_rpc_clnt.c	\
-			plat/imx/common/sci/svc/rm/rm_rpc_clnt.c
+			plat/imx/common/sci/svc/rm/rm_rpc_clnt.c	\
+			plat/imx/common/sci/svc/timer/timer_rpc_clnt.c
diff --git a/plat/imx/common/sci/svc/timer/sci_timer_rpc.h b/plat/imx/common/sci/svc/timer/sci_timer_rpc.h
new file mode 100644
index 0000000..6716399
--- /dev/null
+++ b/plat/imx/common/sci/svc/timer/sci_timer_rpc.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*!
+ * Header file for the TIMER RPC implementation.
+ *
+ * @addtogroup TIMER_SVC
+ * @{
+ */
+
+#ifndef SC_TIMER_RPC_H
+#define SC_TIMER_RPC_H
+
+/* Includes */
+
+/* Defines */
+
+/*!
+ * @name Defines for RPC TIMER function calls
+ */
+/*@{*/
+#define TIMER_FUNC_UNKNOWN 0	/* Unknown function */
+#define TIMER_FUNC_SET_WDOG_TIMEOUT 1U	/* Index for timer_set_wdog_timeout() RPC call */
+#define TIMER_FUNC_SET_WDOG_PRE_TIMEOUT 12U	/* Index for timer_set_wdog_pre_timeout() RPC call */
+#define TIMER_FUNC_START_WDOG 2U	/* Index for timer_start_wdog() RPC call */
+#define TIMER_FUNC_STOP_WDOG 3U	/* Index for timer_stop_wdog() RPC call */
+#define TIMER_FUNC_PING_WDOG 4U	/* Index for timer_ping_wdog() RPC call */
+#define TIMER_FUNC_GET_WDOG_STATUS 5U	/* Index for timer_get_wdog_status() RPC call */
+#define TIMER_FUNC_PT_GET_WDOG_STATUS 13U	/* Index for timer_pt_get_wdog_status() RPC call */
+#define TIMER_FUNC_SET_WDOG_ACTION 10U	/* Index for timer_set_wdog_action() RPC call */
+#define TIMER_FUNC_SET_RTC_TIME 6U	/* Index for timer_set_rtc_time() RPC call */
+#define TIMER_FUNC_GET_RTC_TIME 7U	/* Index for timer_get_rtc_time() RPC call */
+#define TIMER_FUNC_GET_RTC_SEC1970 9U	/* Index for timer_get_rtc_sec1970() RPC call */
+#define TIMER_FUNC_SET_RTC_ALARM 8U	/* Index for timer_set_rtc_alarm() RPC call */
+#define TIMER_FUNC_SET_RTC_PERIODIC_ALARM 14U	/* Index for timer_set_rtc_periodic_alarm() RPC call */
+#define TIMER_FUNC_CANCEL_RTC_ALARM 15U	/* Index for timer_cancel_rtc_alarm() RPC call */
+#define TIMER_FUNC_SET_RTC_CALB 11U	/* Index for timer_set_rtc_calb() RPC call */
+#define TIMER_FUNC_SET_SYSCTR_ALARM 16U	/* Index for timer_set_sysctr_alarm() RPC call */
+#define TIMER_FUNC_SET_SYSCTR_PERIODIC_ALARM 17U	/* Index for timer_set_sysctr_periodic_alarm() RPC call */
+#define TIMER_FUNC_CANCEL_SYSCTR_ALARM 18U	/* Index for timer_cancel_sysctr_alarm() RPC call */
+/*@}*/
+
+/* Types */
+
+/* Functions */
+
+/*!
+ * This function dispatches an incoming TIMER RPC request.
+ *
+ * @param[in]     caller_pt   caller partition
+ * @param[in]     msg         pointer to RPC message
+ */
+void timer_dispatch(sc_rm_pt_t caller_pt, sc_rpc_msg_t *msg);
+
+/*!
+ * This function translates and dispatches an TIMER RPC request.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     msg         pointer to RPC message
+ */
+void timer_xlate(sc_ipc_t ipc, sc_rpc_msg_t *msg);
+
+#endif				/* SC_TIMER_RPC_H */
+
+/**@}*/
diff --git a/plat/imx/common/sci/svc/timer/timer_rpc_clnt.c b/plat/imx/common/sci/svc/timer/timer_rpc_clnt.c
new file mode 100644
index 0000000..a82be96
--- /dev/null
+++ b/plat/imx/common/sci/svc/timer/timer_rpc_clnt.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2019 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*!
+ * File containing client-side RPC functions for the TIMER service. These
+ * functions are ported to clients that communicate to the SC.
+ *
+ * @addtogroup TIMER_SVC
+ * @{
+ */
+
+/* Includes */
+
+#include <sci/sci_types.h>
+#include <sci/svc/rm/sci_rm_api.h>
+#include <sci/svc/timer/sci_timer_api.h>
+#include <sci/sci_rpc.h>
+#include <stdlib.h>
+#include "sci_timer_rpc.h"
+
+/* Local Defines */
+
+/* Local Types */
+
+/* Local Functions */
+
+sc_err_t sc_timer_set_wdog_timeout(sc_ipc_t ipc, sc_timer_wdog_time_t timeout)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_WDOG_TIMEOUT;
+	RPC_U32(&msg, 0U) = (uint32_t)timeout;
+	RPC_SIZE(&msg) = 2U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_set_wdog_pre_timeout(sc_ipc_t ipc,
+				       sc_timer_wdog_time_t pre_timeout)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_WDOG_PRE_TIMEOUT;
+	RPC_U32(&msg, 0U) = (uint32_t)pre_timeout;
+	RPC_SIZE(&msg) = 2U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_start_wdog(sc_ipc_t ipc, sc_bool_t lock)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_START_WDOG;
+	RPC_U8(&msg, 0U) = (uint8_t)lock;
+	RPC_SIZE(&msg) = 2U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_stop_wdog(sc_ipc_t ipc)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_STOP_WDOG;
+	RPC_SIZE(&msg) = 1U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_ping_wdog(sc_ipc_t ipc)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_PING_WDOG;
+	RPC_SIZE(&msg) = 1U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_get_wdog_status(sc_ipc_t ipc,
+				  sc_timer_wdog_time_t *timeout,
+				  sc_timer_wdog_time_t *max_timeout,
+				  sc_timer_wdog_time_t *remaining_time)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_GET_WDOG_STATUS;
+	RPC_SIZE(&msg) = 1U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	if (timeout != NULL)
+		*timeout = RPC_U32(&msg, 0U);
+
+	if (max_timeout != NULL)
+		*max_timeout = RPC_U32(&msg, 4U);
+
+	if (remaining_time != NULL)
+		*remaining_time = RPC_U32(&msg, 8U);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_pt_get_wdog_status(sc_ipc_t ipc, sc_rm_pt_t pt,
+				     sc_bool_t *enb,
+				     sc_timer_wdog_time_t *timeout,
+				     sc_timer_wdog_time_t *remaining_time)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_PT_GET_WDOG_STATUS;
+	RPC_U8(&msg, 0U) = (uint8_t)pt;
+	RPC_SIZE(&msg) = 2U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	if (timeout != NULL)
+		*timeout = RPC_U32(&msg, 0U);
+
+	if (remaining_time != NULL)
+		*remaining_time = RPC_U32(&msg, 4U);
+
+	result = RPC_R8(&msg);
+	if (enb != NULL)
+		*enb = RPC_U8(&msg, 8U);
+
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_set_wdog_action(sc_ipc_t ipc,
+				  sc_rm_pt_t pt, sc_timer_wdog_action_t action)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_WDOG_ACTION;
+	RPC_U8(&msg, 0U) = (uint8_t)pt;
+	RPC_U8(&msg, 1U) = (uint8_t)action;
+	RPC_SIZE(&msg) = 2U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_set_rtc_time(sc_ipc_t ipc, uint16_t year, uint8_t mon,
+			       uint8_t day, uint8_t hour, uint8_t min,
+			       uint8_t sec)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_RTC_TIME;
+	RPC_U16(&msg, 0U) = (uint16_t)year;
+	RPC_U8(&msg, 2U) = (uint8_t)mon;
+	RPC_U8(&msg, 3U) = (uint8_t)day;
+	RPC_U8(&msg, 4U) = (uint8_t)hour;
+	RPC_U8(&msg, 5U) = (uint8_t)min;
+	RPC_U8(&msg, 6U) = (uint8_t)sec;
+	RPC_SIZE(&msg) = 3U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_get_rtc_time(sc_ipc_t ipc, uint16_t *year, uint8_t *mon,
+			       uint8_t *day, uint8_t *hour, uint8_t *min,
+			       uint8_t *sec)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_GET_RTC_TIME;
+	RPC_SIZE(&msg) = 1U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	if (year != NULL)
+		*year = RPC_U16(&msg, 0U);
+
+	result = RPC_R8(&msg);
+	if (mon != NULL)
+		*mon = RPC_U8(&msg, 2U);
+
+	if (day != NULL)
+		*day = RPC_U8(&msg, 3U);
+
+	if (hour != NULL)
+		*hour = RPC_U8(&msg, 4U);
+
+	if (min != NULL)
+		*min = RPC_U8(&msg, 5U);
+
+	if (sec != NULL)
+		*sec = RPC_U8(&msg, 6U);
+
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_get_rtc_sec1970(sc_ipc_t ipc, uint32_t *sec)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_GET_RTC_SEC1970;
+	RPC_SIZE(&msg) = 1U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	if (sec != NULL)
+		*sec = RPC_U32(&msg, 0U);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_set_rtc_alarm(sc_ipc_t ipc, uint16_t year, uint8_t mon,
+				uint8_t day, uint8_t hour, uint8_t min,
+				uint8_t sec)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_RTC_ALARM;
+	RPC_U16(&msg, 0U) = (uint16_t)year;
+	RPC_U8(&msg, 2U) = (uint8_t)mon;
+	RPC_U8(&msg, 3U) = (uint8_t)day;
+	RPC_U8(&msg, 4U) = (uint8_t)hour;
+	RPC_U8(&msg, 5U) = (uint8_t)min;
+	RPC_U8(&msg, 6U) = (uint8_t)sec;
+	RPC_SIZE(&msg) = 3U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_set_rtc_periodic_alarm(sc_ipc_t ipc, uint32_t sec)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_RTC_PERIODIC_ALARM;
+	RPC_U32(&msg, 0U) = (uint32_t)sec;
+	RPC_SIZE(&msg) = 2U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_cancel_rtc_alarm(sc_ipc_t ipc)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_CANCEL_RTC_ALARM;
+	RPC_SIZE(&msg) = 1U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_set_rtc_calb(sc_ipc_t ipc, int8_t count)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_RTC_CALB;
+	RPC_I8(&msg, 0U) = (int8_t) count;
+	RPC_SIZE(&msg) = 2U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_set_sysctr_alarm(sc_ipc_t ipc, uint64_t ticks)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_SYSCTR_ALARM;
+	RPC_U32(&msg, 0U) = (uint32_t)(ticks >> 32U);
+	RPC_U32(&msg, 4U) = (uint32_t)ticks;
+	RPC_SIZE(&msg) = 3U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_set_sysctr_periodic_alarm(sc_ipc_t ipc, uint64_t ticks)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_SET_SYSCTR_PERIODIC_ALARM;
+	RPC_U32(&msg, 0U) = (uint32_t)(ticks >> 32U);
+	RPC_U32(&msg, 4U) = (uint32_t)ticks;
+	RPC_SIZE(&msg) = 3U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+sc_err_t sc_timer_cancel_sysctr_alarm(sc_ipc_t ipc)
+{
+	sc_rpc_msg_t msg;
+	uint8_t result;
+
+	RPC_VER(&msg) = SC_RPC_VERSION;
+	RPC_SVC(&msg) = (uint8_t)SC_RPC_SVC_TIMER;
+	RPC_FUNC(&msg) = (uint8_t)TIMER_FUNC_CANCEL_SYSCTR_ALARM;
+	RPC_SIZE(&msg) = 1U;
+
+	sc_call_rpc(ipc, &msg, SC_FALSE);
+
+	result = RPC_R8(&msg);
+	return (sc_err_t)result;
+}
+
+/**@}*/
diff --git a/plat/imx/imx8qm/platform.mk b/plat/imx/imx8qm/platform.mk
index dc45e90..9113d33 100644
--- a/plat/imx/imx8qm/platform.mk
+++ b/plat/imx/imx8qm/platform.mk
@@ -22,6 +22,8 @@
 				plat/imx/imx8qm/imx8qm_psci.c		\
 				plat/imx/common/imx8_topology.c		\
 				plat/imx/common/imx8_psci.c		\
+				plat/imx/common/imx_sip_svc.c		\
+				plat/imx/common/imx_sip_handler.c	\
 				lib/xlat_tables/aarch64/xlat_tables.c		\
 				lib/xlat_tables/xlat_tables_common.c		\
 				lib/cpus/aarch64/cortex_a53.S			\
diff --git a/plat/imx/imx8qx/platform.mk b/plat/imx/imx8qx/platform.mk
index a831bf2..eeb1f34 100644
--- a/plat/imx/imx8qx/platform.mk
+++ b/plat/imx/imx8qx/platform.mk
@@ -21,6 +21,8 @@
 				plat/imx/imx8qx/imx8qx_psci.c		\
 				plat/imx/common/imx8_topology.c		\
 				plat/imx/common/imx8_psci.c		\
+				plat/imx/common/imx_sip_svc.c		\
+				plat/imx/common/imx_sip_handler.c	\
 				plat/common/plat_psci_common.c		\
 				lib/xlat_tables/xlat_tables_common.c	\
 				lib/xlat_tables/aarch64/xlat_tables.c	\
diff --git a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
index 0476ba8..2bf9a22 100644
--- a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
+++ b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
@@ -11,6 +11,7 @@
 #include <cortex_a57.h>
 #include <platform_def.h>
 #include <tegra_def.h>
+#include <tegra_platform.h>
 
 #define MIDR_PN_CORTEX_A57		0xD07
 
@@ -45,7 +46,6 @@
 	.globl	ns_image_entrypoint
 	.globl	tegra_bl31_phys_base
 	.globl	tegra_console_base
-	.globl	tegra_enable_l2_ecc_parity_prot
 
 	/* ---------------------
 	 * Common CPU init code
@@ -92,20 +92,6 @@
 	msr	actlr_el2, x0
 	isb
 
-	/* -------------------------------------------------------
-	 * Enable L2 ECC and Parity Protection
-	 * -------------------------------------------------------
-	 */
-	adr	x0, tegra_enable_l2_ecc_parity_prot
-	ldr	x0, [x0]
-	cbz	x0, 1f
-	mrs	x0, CORTEX_A57_L2CTLR_EL1
-	and	x1, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT
-	cbnz	x1, 1f
-	orr	x0, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT
-	msr	CORTEX_A57_L2CTLR_EL1, x0
-	isb
-
 	/* --------------------------------
 	 * Enable the cycle count register
 	 * --------------------------------
@@ -326,6 +312,23 @@
 
 #if ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT
 
+	/* --------------------------------------------------------
+	 * Skip the invalidate BTB workaround for Tegra210B01 SKUs.
+	 * --------------------------------------------------------
+	 */
+	mov	x0, #TEGRA_MISC_BASE
+	add	x0, x0, #HARDWARE_REVISION_OFFSET
+	ldr	w1, [x0]
+	lsr	w1, w1, #CHIP_ID_SHIFT
+	and	w1, w1, #CHIP_ID_MASK
+	cmp	w1, #TEGRA_CHIPID_TEGRA21	/* T210? */
+	b.ne	2f
+	ldr	w1, [x0]
+	lsr	w1, w1, #MAJOR_VERSION_SHIFT
+	and	w1, w1, #MAJOR_VERSION_MASK
+	cmp	w1, #0x02			/* T210 B01? */
+	b.eq	2f
+
 	/* -------------------------------------------------------
 	 * Invalidate BTB along with I$ to remove any stale
 	 * entries from the branch predictor array.
@@ -382,7 +385,7 @@
 	.rept	65
 	nop
 	.endr
-
+2:
 	/* --------------------------------------------------
 	 * Do not insert instructions here
 	 * --------------------------------------------------
@@ -460,10 +463,3 @@
 	 */
 tegra_console_base:
 	.quad	0
-
-	/* --------------------------------------------------
-	 * Enable L2 ECC and Parity Protection
-	 * --------------------------------------------------
-	 */
-tegra_enable_l2_ecc_parity_prot:
-	.quad	0
diff --git a/plat/nvidia/tegra/common/drivers/bpmp/bpmp.c b/plat/nvidia/tegra/common/drivers/bpmp/bpmp.c
new file mode 100644
index 0000000..96e3667
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/bpmp/bpmp.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bpmp.h>
+#include <common/debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <mmio.h>
+#include <platform.h>
+#include <stdbool.h>
+#include <string.h>
+#include <tegra_def.h>
+
+#define BPMP_TIMEOUT_10US	10
+
+static uint32_t channel_base[NR_CHANNELS];
+static uint32_t bpmp_init_state = BPMP_INIT_PENDING;
+
+static uint32_t channel_field(unsigned int ch)
+{
+	return mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET) & CH_MASK(ch);
+}
+
+static bool master_free(unsigned int ch)
+{
+	return channel_field(ch) == MA_FREE(ch);
+}
+
+static bool master_acked(unsigned int ch)
+{
+	return channel_field(ch) == MA_ACKD(ch);
+}
+
+static void signal_slave(unsigned int ch)
+{
+	mmio_write_32(TEGRA_RES_SEMA_BASE + CLR_OFFSET, CH_MASK(ch));
+}
+
+static void free_master(unsigned int ch)
+{
+	mmio_write_32(TEGRA_RES_SEMA_BASE + CLR_OFFSET,
+		      MA_ACKD(ch) ^ MA_FREE(ch));
+}
+
+/* should be called with local irqs disabled */
+int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz,
+		void *ib_data, int ib_sz)
+{
+	unsigned int ch = (unsigned int)plat_my_core_pos();
+	mb_data_t *p = (mb_data_t *)(uintptr_t)channel_base[ch];
+	int32_t ret = -ETIMEDOUT, timeout = 0;
+
+	if (bpmp_init_state == BPMP_INIT_COMPLETE) {
+
+		/* loop until BPMP is free */
+		for (timeout = 0; timeout < BPMP_TIMEOUT_10US; timeout++) {
+			if (master_free(ch) == true) {
+				break;
+			}
+
+			udelay(1);
+		}
+
+		if (timeout != BPMP_TIMEOUT_10US) {
+
+			/* generate the command struct */
+			p->code = mrq;
+			p->flags = DO_ACK;
+			(void)memcpy((void *)p->data, ob_data, (size_t)ob_sz);
+
+			/* signal command ready to the BPMP */
+			signal_slave(ch);
+			mmio_write_32(TEGRA_PRI_ICTLR_BASE + CPU_IEP_FIR_SET,
+				      (1UL << INT_SHR_SEM_OUTBOX_FULL));
+
+			/* loop until the command is executed */
+			for (timeout = 0; timeout < BPMP_TIMEOUT_10US; timeout++) {
+				if (master_acked(ch) == true) {
+					break;
+				}
+
+				udelay(1);
+			}
+
+			if (timeout != BPMP_TIMEOUT_10US) {
+
+				/* get the command response */
+				(void)memcpy(ib_data, (const void *)p->data,
+					     (size_t)ib_sz);
+
+				/* return error code */
+				ret = p->code;
+
+				/* free this channel */
+				free_master(ch);
+			}
+		}
+
+	} else {
+		/* return error code */
+		ret = -EINVAL;
+	}
+
+	if (timeout == BPMP_TIMEOUT_10US) {
+		ERROR("Timed out waiting for bpmp's response");
+	}
+
+	return ret;
+}
+
+int tegra_bpmp_init(void)
+{
+	uint32_t val, base;
+	unsigned int ch;
+	int ret = 0;
+
+	if (bpmp_init_state != BPMP_INIT_COMPLETE) {
+
+		/* check if the bpmp processor is alive. */
+		val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET);
+		if (val != SIGN_OF_LIFE) {
+			ERROR("BPMP precessor not available\n");
+			ret = -ENOTSUP;
+		}
+
+		/* check if clock for the atomics block is enabled */
+		val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_ENB_V);
+		if ((val & CAR_ENABLE_ATOMICS) == 0) {
+			ERROR("Clock to the atomics block is disabled\n");
+		}
+
+		/* check if the atomics block is out of reset */
+		val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_CLR_V);
+		if ((val & CAR_ENABLE_ATOMICS) == CAR_ENABLE_ATOMICS) {
+			ERROR("Reset to the atomics block is asserted\n");
+		}
+
+		/* base address to get the result from Atomics */
+		base = TEGRA_ATOMICS_BASE + RESULT0_REG_OFFSET;
+
+		/* channel area is setup by BPMP before signaling handshake */
+		for (ch = 0; ch < NR_CHANNELS; ch++) {
+
+			/* issue command to get the channel base address */
+			mmio_write_32(base, (ch << TRIGGER_ID_SHIFT) |
+				      ATOMIC_CMD_GET);
+
+			/* get the base address for the channel */
+			channel_base[ch] = mmio_read_32(base);
+
+			/* increment result register offset */
+			base += 4UL;
+		}
+
+		/* mark state as "initialized" */
+		if (ret == 0)
+			bpmp_init_state = BPMP_INIT_COMPLETE;
+
+		/* the channel values have to be visible across all cpus */
+		flush_dcache_range((uint64_t)channel_base, sizeof(channel_base));
+		flush_dcache_range((uint64_t)&bpmp_init_state,
+				   sizeof(bpmp_init_state));
+
+		INFO("%s: done\n", __func__);
+	}
+
+	return ret;
+}
diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
index 55b9152..de431b7 100644
--- a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
+++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
@@ -87,6 +87,9 @@
 	 * strongly ordered MSS clients. ROC needs to be single point
 	 * of control on overriding the memory type. So, remove TSA's
 	 * memtype override.
+	 *
+	 * MC clients with default SO_DEV override still enabled at TSA:
+	 * AONW, BPMPW, SCEW, APEW
 	 */
 #if ENABLE_AFI_DEVICE
 	mc_set_tsa_passthrough(AFIW);
@@ -106,63 +109,121 @@
 	mc_set_tsa_passthrough(AONDMAW);
 	mc_set_tsa_passthrough(SCEDMAW);
 
-	/*
-	 * Change COH_PATH_OVERRIDE_SO_DEV from NO_OVERRIDE -> FORCE_COHERENT
-	 * for boot and strongly ordered MSS clients. This steers all sodev
-	 * transactions to ROC.
+	/* Parker has no IO Coherency support and need the following:
+	 * Ordered MC Clients on Parker are AFI, EQOS, SATA, XUSB.
+	 * ISO clients(DISP, VI, EQOS) should never snoop caches and
+	 *     don't need ROC/PCFIFO ordering.
+	 * ISO clients(EQOS) that need ordering should use PCFIFO ordering
+	 *     and bypass ROC ordering by using FORCE_NON_COHERENT path.
+	 * FORCE_NON_COHERENT/FORCE_COHERENT config take precedence
+	 *     over SMMU attributes.
+	 * Force all Normal memory transactions from ISO and non-ISO to be
+	 *     non-coherent(bypass ROC, avoid cache snoop to avoid perf hit).
+	 * Force the SO_DEV transactions from ordered ISO clients(EQOS) to
+	 *     non-coherent path and enable MC PCFIFO interlock for ordering.
+	 * Force the SO_DEV transactions from ordered non-ISO clients (PCIe,
+	 *     XUSB, SATA) to coherent so that the transactions are
+	 *     ordered by ROC.
+	 * PCFIFO ensure write ordering.
+	 * Read after Write ordering is maintained/enforced by MC clients.
+	 * Clients that need PCIe type write ordering must
+	 *     go through ROC ordering.
+	 * Ordering enable for Read clients is not necessary.
+	 * R5's and A9 would get necessary ordering from AXI and
+	 *     don't need ROC ordering enable:
+	 *     - MMIO ordering is through dev mapping and MMIO
+	 *       accesses bypass SMMU.
+	 *     - Normal memory is accessed through SMMU and ordering is
+	 *       ensured by client and AXI.
+	 *     - Ack point for Normal memory is WCAM in MC.
+	 *     - MMIO's can be early acked and AXI ensures dev memory ordering,
+	 *       Client ensures read/write direction change ordering.
+	 *     - See Bug 200312466 for more details.
 	 *
-	 * Change AXID_OVERRIDE/AXID_OVERRIDE_SO_DEV only for some clients
-	 * whose AXI IDs we know and trust.
+	 * CGID_TAG_ADR is only present from T186 A02. As this code is common
+	 *    between A01 and A02, tegra_memctrl_set_overrides() programs
+	 *    CGID_TAG_ADR for the necessary clients on A02.
 	 */
-
-#if ENABLE_AFI_DEVICE
-	/* Match AFIW */
-	mc_set_forced_coherent_so_dev_cfg(AFIR);
-#endif
-
+	mc_set_txn_override(HDAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(BPMPW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(PTCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVDISPLAYR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(EQOSW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVJPGSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(ISPRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SDMMCWAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(VICSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(MPCOREW, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
+	mc_set_txn_override(GPUSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(AXISR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SCEDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SDMMCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(EQOSR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	/* See bug 200131110 comment #35*/
+	mc_set_txn_override(APEDMAR, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVENCSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SDMMCRAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(VICSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(BPMPDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(VIW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SDMMCRAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(AXISW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(XUSB_DEVR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(UFSHCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(TSECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(GPUSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SATAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(XUSB_HOSTW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
+	mc_set_txn_override(TSECSWRB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(GPUSRD2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SCEDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(GPUSWR2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(AONDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	/* See bug 200131110 comment #35*/
+	mc_set_txn_override(APEDMAW, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(AONW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(HOST1XDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(ETRR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SESWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVJPGSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVDECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(TSECSRDB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(BPMPDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(APER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVDECSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(XUSB_HOSTR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(ISPWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SESRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SCER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(AONR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(MPCORER, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
+	mc_set_txn_override(SDMMCWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(HDAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVDECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(UFSHCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(AONDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SATAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
+	mc_set_txn_override(ETRW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(VICSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVENCSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	/* See bug 200131110 comment #35 */
+	mc_set_txn_override(AFIR, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SDMMCWAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SDMMCRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(NVDISPLAYR1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(ISPWB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(BPMPR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(APEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(SDMMCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+	mc_set_txn_override(XUSB_DEVW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
+	mc_set_txn_override(TSECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
 	/*
 	 * See bug 200131110 comment #35 - there are no normal requests
 	 * and AWID for SO/DEV requests is hardcoded in RTL for a
 	 * particular PCIE controller
 	 */
-#if ENABLE_AFI_DEVICE
-	mc_set_forced_coherent_so_dev_cfg(AFIW);
-#endif
-	mc_set_forced_coherent_cfg(HDAR);
-	mc_set_forced_coherent_cfg(HDAW);
-	mc_set_forced_coherent_cfg(SATAR);
-	mc_set_forced_coherent_cfg(SATAW);
-	mc_set_forced_coherent_cfg(XUSB_HOSTR);
-	mc_set_forced_coherent_cfg(XUSB_HOSTW);
-	mc_set_forced_coherent_cfg(XUSB_DEVR);
-	mc_set_forced_coherent_cfg(XUSB_DEVW);
-	mc_set_forced_coherent_cfg(SDMMCRAB);
-	mc_set_forced_coherent_cfg(SDMMCWAB);
-
-	/* Match APEDMAW */
-	mc_set_forced_coherent_axid_so_dev_cfg(APEDMAR);
-
-	/*
-	 * See bug 200131110 comment #35 - AWID for normal requests
-	 * is 0x80 and AWID for SO/DEV requests is 0x01
-	 */
-	mc_set_forced_coherent_axid_so_dev_cfg(APEDMAW);
-	mc_set_forced_coherent_cfg(SESRD);
-	mc_set_forced_coherent_cfg(SESWR);
-	mc_set_forced_coherent_cfg(ETRR);
-	mc_set_forced_coherent_cfg(ETRW);
-	mc_set_forced_coherent_cfg(AXISR);
-	mc_set_forced_coherent_cfg(AXISW);
-	mc_set_forced_coherent_cfg(EQOSR);
-	mc_set_forced_coherent_cfg(EQOSW);
-	mc_set_forced_coherent_cfg(UFSHCR);
-	mc_set_forced_coherent_cfg(UFSHCW);
-	mc_set_forced_coherent_cfg(BPMPDMAR);
-	mc_set_forced_coherent_cfg(BPMPDMAW);
-	mc_set_forced_coherent_cfg(AONDMAR);
-	mc_set_forced_coherent_cfg(AONDMAW);
-	mc_set_forced_coherent_cfg(SCEDMAR);
-	mc_set_forced_coherent_cfg(SCEDMAW);
+	mc_set_txn_override(AFIW, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_COHERENT);
+	mc_set_txn_override(SCEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
 
 	/*
 	 * At this point, ordering can occur at ROC. So, remove PCFIFO's
@@ -192,11 +253,12 @@
 		mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) &
 		mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) &
 		mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) &
-		mc_set_pcfifo_unordered_boot_so_mss(4, EQOSW) &
 		mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) &
 		mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) &
 		mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) &
 		mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW);
+	/* EQOSW is the only client that has PCFIFO order enabled. */
+	val |= mc_set_pcfifo_ordered_boot_so_mss(4, EQOSW);
 	tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val);
 
 	val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL &
@@ -204,45 +266,6 @@
 	tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val);
 
 	/*
-	 * At this point, ordering can occur at ROC. SMMU need not
-	 * reorder any requests.
-	 *
-	 * Change SMMU_*_ORDERED_CLIENT from ORDERED -> UNORDERED
-	 * for boot and strongly ordered MSS clients
-	 */
-	val = MC_SMMU_CLIENT_CONFIG1_RESET_VAL &
-#if ENABLE_AFI_DEVICE
-		mc_set_smmu_unordered_boot_so_mss(1, AFIW) &
-#endif
-		mc_set_smmu_unordered_boot_so_mss(1, HDAW) &
-		mc_set_smmu_unordered_boot_so_mss(1, SATAW);
-	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG1, val);
-
-	val = MC_SMMU_CLIENT_CONFIG2_RESET_VAL &
-		mc_set_smmu_unordered_boot_so_mss(2, XUSB_HOSTW) &
-		mc_set_smmu_unordered_boot_so_mss(2, XUSB_DEVW);
-	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG2, val);
-
-	val = MC_SMMU_CLIENT_CONFIG3_RESET_VAL &
-		mc_set_smmu_unordered_boot_so_mss(3, SDMMCWAB);
-	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG3, val);
-
-	val = MC_SMMU_CLIENT_CONFIG4_RESET_VAL &
-		mc_set_smmu_unordered_boot_so_mss(4, SESWR) &
-		mc_set_smmu_unordered_boot_so_mss(4, ETRW) &
-		mc_set_smmu_unordered_boot_so_mss(4, AXISW) &
-		mc_set_smmu_unordered_boot_so_mss(4, EQOSW) &
-		mc_set_smmu_unordered_boot_so_mss(4, UFSHCW) &
-		mc_set_smmu_unordered_boot_so_mss(4, BPMPDMAW) &
-		mc_set_smmu_unordered_boot_so_mss(4, AONDMAW) &
-		mc_set_smmu_unordered_boot_so_mss(4, SCEDMAW);
-	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG4, val);
-
-	val = MC_SMMU_CLIENT_CONFIG5_RESET_VAL &
-		mc_set_smmu_unordered_boot_so_mss(5, APEDMAW);
-	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG5, val);
-
-	/*
 	 * Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS
 	 * clients to allow memory traffic from all clients to start passing
 	 * through ROC
diff --git a/plat/nvidia/tegra/common/tegra_bl31_setup.c b/plat/nvidia/tegra/common/tegra_bl31_setup.c
index b496650..0806307 100644
--- a/plat/nvidia/tegra/common/tegra_bl31_setup.c
+++ b/plat/nvidia/tegra/common/tegra_bl31_setup.c
@@ -27,6 +27,7 @@
 
 #include <memctrl.h>
 #include <tegra_def.h>
+#include <tegra_platform.h>
 #include <tegra_private.h>
 
 /* length of Trusty's input parameters (in bytes) */
@@ -122,6 +123,7 @@
 	plat_params_from_bl2_t *plat_params = (plat_params_from_bl2_t *)arg1;
 	image_info_t bl32_img_info = { {0} };
 	uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end;
+	uint32_t console_clock;
 
 	/*
 	 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
@@ -155,6 +157,7 @@
 	plat_bl31_params_from_bl2.tzdram_base = plat_params->tzdram_base;
 	plat_bl31_params_from_bl2.tzdram_size = plat_params->tzdram_size;
 	plat_bl31_params_from_bl2.uart_id = plat_params->uart_id;
+	plat_bl31_params_from_bl2.l2_ecc_parity_prot_dis = plat_params->l2_ecc_parity_prot_dis;
 
 	/*
 	 * It is very important that we run either from TZDRAM or TZSRAM base.
@@ -165,6 +168,15 @@
 		panic();
 
 	/*
+	 * Reference clock used by the FPGAs is a lot slower.
+	 */
+	if (tegra_platform_is_fpga() == 1U) {
+		console_clock = TEGRA_BOOT_UART_CLK_13_MHZ;
+	} else {
+		console_clock = TEGRA_BOOT_UART_CLK_408_MHZ;
+	}
+
+	/*
 	 * Get the base address of the UART controller to be used for the
 	 * console
 	 */
@@ -174,8 +186,8 @@
 		/*
 		 * Configure the UART port to be used as the console
 		 */
-		console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
-			TEGRA_CONSOLE_BAUDRATE);
+		console_init(tegra_console_base, console_clock,
+			     TEGRA_CONSOLE_BAUDRATE);
 	}
 
 	/*
@@ -239,6 +251,11 @@
 	args->arg0 = bl32_mem_size;
 	args->arg1 = bl32_boot_params;
 	args->arg2 = TRUSTY_PARAMS_LEN_BYTES;
+
+	/* update EKS size */
+	if (args->arg4 != 0U) {
+		args->arg2 = args->arg4;
+	}
 }
 #endif
 
diff --git a/plat/nvidia/tegra/common/tegra_fiq_glue.c b/plat/nvidia/tegra/common/tegra_fiq_glue.c
index 0b663ce..9a43f76 100644
--- a/plat/nvidia/tegra/common/tegra_fiq_glue.c
+++ b/plat/nvidia/tegra/common/tegra_fiq_glue.c
@@ -41,6 +41,11 @@
 	uint32_t cpu = plat_my_core_pos();
 	uint32_t irq;
 
+	(void)id;
+	(void)flags;
+	(void)handle;
+	(void)cookie;
+
 	bakery_lock_get(&tegra_fiq_lock);
 
 	/*
diff --git a/plat/nvidia/tegra/common/tegra_platform.c b/plat/nvidia/tegra/common/tegra_platform.c
index 10edf92..72da126 100644
--- a/plat/nvidia/tegra/common/tegra_platform.c
+++ b/plat/nvidia/tegra/common/tegra_platform.c
@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
+#include <assert.h>
 #include <lib/mmio.h>
-
 #include <tegra_def.h>
 #include <tegra_platform.h>
 #include <tegra_private.h>
@@ -19,35 +19,33 @@
 	TEGRA_PLATFORM_QT,
 	TEGRA_PLATFORM_FPGA,
 	TEGRA_PLATFORM_EMULATION,
+	TEGRA_PLATFORM_LINSIM,
+	TEGRA_PLATFORM_UNIT_FPGA,
+	TEGRA_PLATFORM_VIRT_DEV_KIT,
 	TEGRA_PLATFORM_MAX,
 } tegra_platform_t;
 
 /*******************************************************************************
  * Tegra macros defining all the SoC minor versions
  ******************************************************************************/
-#define TEGRA_MINOR_QT			0
-#define TEGRA_MINOR_FPGA		1
-#define TEGRA_MINOR_EMULATION_MIN	2
-#define TEGRA_MINOR_EMULATION_MAX	10
-
-/*******************************************************************************
- * Tegra major, minor version helper macros
- ******************************************************************************/
-#define MAJOR_VERSION_SHIFT		0x4
-#define MAJOR_VERSION_MASK		0xF
-#define MINOR_VERSION_SHIFT		0x10
-#define MINOR_VERSION_MASK		0xF
-#define CHIP_ID_SHIFT			8
-#define CHIP_ID_MASK			0xFF
+#define TEGRA_MINOR_QT			U(0)
+#define TEGRA_MINOR_FPGA		U(1)
+#define TEGRA_MINOR_ASIM_QT		U(2)
+#define TEGRA_MINOR_ASIM_LINSIM		U(3)
+#define TEGRA_MINOR_DSIM_ASIM_LINSIM	U(4)
+#define TEGRA_MINOR_UNIT_FPGA		U(5)
+#define TEGRA_MINOR_VIRT_DEV_KIT	U(6)
 
 /*******************************************************************************
- * Tegra chip ID values
+ * Tegra macros defining all the SoC pre_si_platform
  ******************************************************************************/
-typedef enum tegra_chipid {
-	TEGRA_CHIPID_TEGRA13 = 0x13,
-	TEGRA_CHIPID_TEGRA21 = 0x21,
-	TEGRA_CHIPID_TEGRA18 = 0x18,
-} tegra_chipid_t;
+#define TEGRA_PRE_SI_QT			U(1)
+#define TEGRA_PRE_SI_FPGA		U(2)
+#define TEGRA_PRE_SI_UNIT_FPGA		U(3)
+#define TEGRA_PRE_SI_ASIM_QT		U(4)
+#define TEGRA_PRE_SI_ASIM_LINSIM	U(5)
+#define TEGRA_PRE_SI_DSIM_ASIM_LINSIM	U(6)
+#define TEGRA_PRE_SI_VDK		U(8)
 
 /*
  * Read the chip ID value
@@ -73,80 +71,191 @@
 	return (tegra_get_chipid() >> MINOR_VERSION_SHIFT) & MINOR_VERSION_MASK;
 }
 
-uint8_t tegra_chipid_is_t132(void)
+/*
+ * Read the chip's pre_si_platform valus from the chip ID value
+ */
+static uint32_t tegra_get_chipid_pre_si_platform(void)
 {
-	uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+	return (tegra_get_chipid() >> PRE_SI_PLATFORM_SHIFT) & PRE_SI_PLATFORM_MASK;
+}
 
-	return (chip_id == TEGRA_CHIPID_TEGRA13);
+bool tegra_chipid_is_t132(void)
+{
+	uint32_t chip_id = ((tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK);
+
+	return (chip_id == (uint32_t)TEGRA_CHIPID_TEGRA13);
 }
 
-uint8_t tegra_chipid_is_t210(void)
+bool tegra_chipid_is_t186(void)
 {
 	uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
 
-	return (chip_id == TEGRA_CHIPID_TEGRA21);
+	return (chip_id == TEGRA_CHIPID_TEGRA18);
 }
 
-uint8_t tegra_chipid_is_t186(void)
+bool tegra_chipid_is_t210(void)
 {
 	uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
 
-	return (chip_id == TEGRA_CHIPID_TEGRA18);
+	return (chip_id == (uint32_t)TEGRA_CHIPID_TEGRA21);
 }
 
+bool tegra_chipid_is_t210_b01(void)
+{
+	return (tegra_chipid_is_t210() && (tegra_get_chipid_major() == 0x2UL));
+}
+
 /*
  * Read the chip ID value and derive the platform
  */
 static tegra_platform_t tegra_get_platform(void)
 {
-	uint32_t major = tegra_get_chipid_major();
-	uint32_t minor = tegra_get_chipid_minor();
+	uint32_t major, minor, pre_si_platform;
+	tegra_platform_t ret;
 
-	/* Actual silicon platforms have a non-zero major version */
-	if (major > 0)
-		return TEGRA_PLATFORM_SILICON;
+	/* get the major/minor chip ID values */
+	major = tegra_get_chipid_major();
+	minor = tegra_get_chipid_minor();
+	pre_si_platform = tegra_get_chipid_pre_si_platform();
 
-	/*
-	 * The minor version number is used by simulation platforms
-	 */
+	if (major == 0U) {
+		/*
+		 * The minor version number is used by simulation platforms
+		 */
+		switch (minor) {
+		/*
+		 * Cadence's QuickTurn emulation system is a Solaris-based
+		 * chip emulation system
+		 */
+		case TEGRA_MINOR_QT:
+		case TEGRA_MINOR_ASIM_QT:
+			ret = TEGRA_PLATFORM_QT;
+			break;
 
-	/*
-	 * Cadence's QuickTurn emulation system is a Solaris-based
-	 * chip emulation system
-	 */
-	if (minor == TEGRA_MINOR_QT)
-		return TEGRA_PLATFORM_QT;
+		/*
+		 * FPGAs are used during early software/hardware development
+		 */
+		case TEGRA_MINOR_FPGA:
+			ret = TEGRA_PLATFORM_FPGA;
+			break;
+		/*
+		 * Linsim is a reconfigurable, clock-driven, mixed RTL/cmodel
+		 * simulation framework.
+		 */
+		case TEGRA_MINOR_ASIM_LINSIM:
+		case TEGRA_MINOR_DSIM_ASIM_LINSIM:
+			ret = TEGRA_PLATFORM_LINSIM;
+			break;
 
-	/*
-	 * FPGAs are used during early software/hardware development
-	 */
-	if (minor == TEGRA_MINOR_FPGA)
-		return TEGRA_PLATFORM_FPGA;
+		/*
+		 * Unit FPGAs run the actual hardware block IP on the FPGA with
+		 * the other parts of the system using Linsim.
+		 */
+		case TEGRA_MINOR_UNIT_FPGA:
+			ret = TEGRA_PLATFORM_UNIT_FPGA;
+			break;
+		/*
+		 * The Virtualizer Development Kit (VDK) is the standard chip
+		 * development from Synopsis.
+		 */
+		case TEGRA_MINOR_VIRT_DEV_KIT:
+			ret = TEGRA_PLATFORM_VIRT_DEV_KIT;
+			break;
 
-	/* Minor version reserved for other emulation platforms */
-	if ((minor > TEGRA_MINOR_FPGA) && (minor <= TEGRA_MINOR_EMULATION_MAX))
-		return TEGRA_PLATFORM_EMULATION;
+		default:
+			ret = TEGRA_PLATFORM_MAX;
+			break;
+		}
 
-	/* unsupported platform */
-	return TEGRA_PLATFORM_MAX;
+	} else if (pre_si_platform > 0U) {
+
+		switch (pre_si_platform) {
+		/*
+		 * Cadence's QuickTurn emulation system is a Solaris-based
+		 * chip emulation system
+		 */
+		case TEGRA_PRE_SI_QT:
+		case TEGRA_PRE_SI_ASIM_QT:
+			ret = TEGRA_PLATFORM_QT;
+			break;
+
+		/*
+		 * FPGAs are used during early software/hardware development
+		 */
+		case TEGRA_PRE_SI_FPGA:
+			ret = TEGRA_PLATFORM_FPGA;
+			break;
+		/*
+		 * Linsim is a reconfigurable, clock-driven, mixed RTL/cmodel
+		 * simulation framework.
+		 */
+		case TEGRA_PRE_SI_ASIM_LINSIM:
+		case TEGRA_PRE_SI_DSIM_ASIM_LINSIM:
+			ret = TEGRA_PLATFORM_LINSIM;
+			break;
+
+		/*
+		 * Unit FPGAs run the actual hardware block IP on the FPGA with
+		 * the other parts of the system using Linsim.
+		 */
+		case TEGRA_PRE_SI_UNIT_FPGA:
+			ret = TEGRA_PLATFORM_UNIT_FPGA;
+			break;
+		/*
+		 * The Virtualizer Development Kit (VDK) is the standard chip
+		 * development from Synopsis.
+		 */
+		case TEGRA_PRE_SI_VDK:
+			ret = TEGRA_PLATFORM_VIRT_DEV_KIT;
+			break;
+
+		default:
+			ret = TEGRA_PLATFORM_MAX;
+			break;
+		}
+
+	} else {
+		/* Actual silicon platforms have a non-zero major version */
+		ret = TEGRA_PLATFORM_SILICON;
+	}
+
+	return ret;
 }
 
-uint8_t tegra_platform_is_silicon(void)
+bool tegra_platform_is_silicon(void)
 {
-	return (tegra_get_platform() == TEGRA_PLATFORM_SILICON);
+	return ((tegra_get_platform() == TEGRA_PLATFORM_SILICON) ? true : false);
 }
 
-uint8_t tegra_platform_is_qt(void)
+bool tegra_platform_is_qt(void)
 {
-	return (tegra_get_platform() == TEGRA_PLATFORM_QT);
+	return ((tegra_get_platform() == TEGRA_PLATFORM_QT) ? true : false);
 }
 
-uint8_t tegra_platform_is_fpga(void)
+bool tegra_platform_is_linsim(void)
 {
-	return (tegra_get_platform() == TEGRA_PLATFORM_FPGA);
+	tegra_platform_t plat = tegra_get_platform();
+
+	return (((plat == TEGRA_PLATFORM_LINSIM) ||
+	       (plat == TEGRA_PLATFORM_UNIT_FPGA)) ? true : false);
 }
 
-uint8_t tegra_platform_is_emulation(void)
+bool tegra_platform_is_fpga(void)
+{
+	return ((tegra_get_platform() == TEGRA_PLATFORM_FPGA) ? true : false);
+}
+
+bool tegra_platform_is_emulation(void)
 {
 	return (tegra_get_platform() == TEGRA_PLATFORM_EMULATION);
 }
+
+bool tegra_platform_is_unit_fpga(void)
+{
+	return ((tegra_get_platform() == TEGRA_PLATFORM_UNIT_FPGA) ? true : false);
+}
+
+bool tegra_platform_is_virt_dev_kit(void)
+{
+	return ((tegra_get_platform() == TEGRA_PLATFORM_VIRT_DEV_KIT) ? true : false);
+}
diff --git a/plat/nvidia/tegra/common/tegra_pm.c b/plat/nvidia/tegra/common/tegra_pm.c
index 8361ddd..ce44983 100644
--- a/plat/nvidia/tegra/common/tegra_pm.c
+++ b/plat/nvidia/tegra/common/tegra_pm.c
@@ -21,6 +21,7 @@
 #include <memctrl.h>
 #include <pmc.h>
 #include <tegra_def.h>
+#include <tegra_platform.h>
 #include <tegra_private.h>
 
 extern uint64_t tegra_bl31_phys_base;
@@ -222,6 +223,7 @@
 void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
 {
 	plat_params_from_bl2_t *plat_params;
+	uint32_t console_clock;
 
 	/*
 	 * Initialize the GIC cpu and distributor interfaces
@@ -234,10 +236,19 @@
 	if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
 			PSTATE_ID_SOC_POWERDN) {
 
+		/*
+		 * Reference clock used by the FPGAs is a lot slower.
+		 */
+		if (tegra_platform_is_fpga() == 1U) {
+			console_clock = TEGRA_BOOT_UART_CLK_13_MHZ;
+		} else {
+			console_clock = TEGRA_BOOT_UART_CLK_408_MHZ;
+		}
+
 		/* Initialize the runtime console */
 		if (tegra_console_base != (uint64_t)0) {
-			console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
-				TEGRA_CONSOLE_BAUDRATE);
+			console_init(tegra_console_base, console_clock,
+				     TEGRA_CONSOLE_BAUDRATE);
 		}
 
 		/*
diff --git a/plat/nvidia/tegra/common/tegra_sip_calls.c b/plat/nvidia/tegra/common/tegra_sip_calls.c
index e50d12f..e7acece 100644
--- a/plat/nvidia/tegra/common/tegra_sip_calls.c
+++ b/plat/nvidia/tegra/common/tegra_sip_calls.c
@@ -31,20 +31,29 @@
  ******************************************************************************/
 extern uint8_t tegra_fake_system_suspend;
 
-
 /*******************************************************************************
  * SoC specific SiP handler
  ******************************************************************************/
 #pragma weak plat_sip_handler
-int plat_sip_handler(uint32_t smc_fid,
+int32_t plat_sip_handler(uint32_t smc_fid,
 		     uint64_t x1,
 		     uint64_t x2,
 		     uint64_t x3,
 		     uint64_t x4,
-		     void *cookie,
+		     const void *cookie,
 		     void *handle,
 		     uint64_t flags)
 {
+	/* unused parameters */
+	(void)smc_fid;
+	(void)x1;
+	(void)x2;
+	(void)x3;
+	(void)x4;
+	(void)cookie;
+	(void)handle;
+	(void)flags;
+
 	return -ENOTSUP;
 }
 
@@ -61,112 +70,115 @@
 			    u_register_t flags)
 {
 	uint32_t regval;
-	int err;
+	int32_t err;
 
 	/* Check if this is a SoC specific SiP */
 	err = plat_sip_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
-	if (err == 0)
-		SMC_RET1(handle, (uint64_t)err);
+	if (err == 0) {
 
-	switch (smc_fid) {
+		SMC_RET1(handle, (uint64_t)err);
 
-	case TEGRA_SIP_NEW_VIDEOMEM_REGION:
+	} else {
 
-		/* clean up the high bits */
-		x2 = (uint32_t)x2;
+		switch (smc_fid) {
 
-		/*
-		 * Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
-		 * or falls outside of the valid DRAM range
-		 */
-		err = bl31_check_ns_address(x1, x2);
-		if (err)
-			SMC_RET1(handle, err);
+		case TEGRA_SIP_NEW_VIDEOMEM_REGION:
 
-		/*
-		 * Check if Video Memory is aligned to 1MB.
-		 */
-		if ((x1 & 0xFFFFF) || (x2 & 0xFFFFF)) {
-			ERROR("Unaligned Video Memory base address!\n");
-			SMC_RET1(handle, -ENOTSUP);
-		}
+			/* clean up the high bits */
+			x2 = (uint32_t)x2;
 
-		/*
-		 * The GPU is the user of the Video Memory region. In order to
-		 * transition to the new memory region smoothly, we program the
-		 * new base/size ONLY if the GPU is in reset mode.
-		 */
-		regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
-				      TEGRA_GPU_RESET_REG_OFFSET);
-		if ((regval & GPU_RESET_BIT) == 0U) {
-			ERROR("GPU not in reset! Video Memory setup failed\n");
-			SMC_RET1(handle, -ENOTSUP);
-		}
+			/*
+			 * Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
+			 * or falls outside of the valid DRAM range
+			*/
+			err = bl31_check_ns_address(x1, x2);
+			if (err != 0) {
+				SMC_RET1(handle, (uint64_t)err);
+			}
 
-		/* new video memory carveout settings */
-		tegra_memctrl_videomem_setup(x1, x2);
+			/*
+			 * Check if Video Memory is aligned to 1MB.
+			 */
+			if (((x1 & 0xFFFFFU) != 0U) || ((x2 & 0xFFFFFU) != 0U)) {
+				ERROR("Unaligned Video Memory base address!\n");
+				SMC_RET1(handle, -ENOTSUP);
+			}
 
-		SMC_RET1(handle, 0);
-		break;
+			/*
+			 * The GPU is the user of the Video Memory region. In order to
+			 * transition to the new memory region smoothly, we program the
+			 * new base/size ONLY if the GPU is in reset mode.
+			 */
+			regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
+					      TEGRA_GPU_RESET_REG_OFFSET);
+			if ((regval & GPU_RESET_BIT) == 0UL) {
+				ERROR("GPU not in reset! Video Memory setup failed\n");
+				SMC_RET1(handle, -ENOTSUP);
+			}
 
-	/*
-	 * The NS world registers the address of its handler to be
-	 * used for processing the FIQ. This is normally used by the
-	 * NS FIQ debugger driver to detect system hangs by programming
-	 * a watchdog timer to fire a FIQ interrupt.
-	 */
-	case TEGRA_SIP_FIQ_NS_ENTRYPOINT:
+			/* new video memory carveout settings */
+			tegra_memctrl_videomem_setup(x1, (uint32_t)x2);
 
-		if (!x1)
-			SMC_RET1(handle, SMC_UNK);
+			SMC_RET1(handle, 0);
 
 		/*
-		 * TODO: Check if x1 contains a valid DRAM address
+		 * The NS world registers the address of its handler to be
+		 * used for processing the FIQ. This is normally used by the
+		 * NS FIQ debugger driver to detect system hangs by programming
+		 * a watchdog timer to fire a FIQ interrupt.
 		 */
+		case TEGRA_SIP_FIQ_NS_ENTRYPOINT:
 
-		/* store the NS world's entrypoint */
-		tegra_fiq_set_ns_entrypoint(x1);
+			if (x1 == 0U) {
+				SMC_RET1(handle, SMC_UNK);
+			}
 
-		SMC_RET1(handle, 0);
-		break;
+			/*
+			 * TODO: Check if x1 contains a valid DRAM address
+			 */
 
-	/*
-	 * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0
-	 * CPU context when the FIQ interrupt was triggered. This allows the
-	 * NS world to understand the CPU state when the watchdog interrupt
-	 * triggered.
-	 */
-	case TEGRA_SIP_FIQ_NS_GET_CONTEXT:
+			/* store the NS world's entrypoint */
+			tegra_fiq_set_ns_entrypoint(x1);
 
-		/* retrieve context registers when FIQ triggered */
-		tegra_fiq_get_intr_context();
-
-		SMC_RET0(handle);
-		break;
+			SMC_RET1(handle, 0);
 
-	case TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND:
 		/*
-		 * System suspend fake mode is set if we are on VDK and we make
-		 * a debug SIP call. This mode ensures that we excercise debug
-		 * path instead of the regular code path to suit the pre-silicon
-		 * platform needs. These include replacing the call to WFI by
-		 * a warm reset request.
+		 * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0
+		 * CPU context when the FIQ interrupt was triggered. This allows the
+		 * NS world to understand the CPU state when the watchdog interrupt
+		 * triggered.
 		 */
-		if (tegra_platform_is_emulation() != 0U) {
+		case TEGRA_SIP_FIQ_NS_GET_CONTEXT:
 
-			tegra_fake_system_suspend = 1;
-			SMC_RET1(handle, 0);
-		}
+			/* retrieve context registers when FIQ triggered */
+			(void)tegra_fiq_get_intr_context();
 
-		/*
-		 * We return to the external world as if this SIP is not
-		 * implemented in case, we are not running on VDK.
-		 */
-		break;
+			SMC_RET0(handle);
+
+		case TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND:
+			/*
+			 * System suspend fake mode is set if we are on VDK and we make
+			 * a debug SIP call. This mode ensures that we excercise debug
+			 * path instead of the regular code path to suit the pre-silicon
+			 * platform needs. These include replacing the call to WFI by
+			 * a warm reset request.
+			 */
+			if (tegra_platform_is_virt_dev_kit() != false) {
 
-	default:
-		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
-		break;
+				tegra_fake_system_suspend = 1;
+				SMC_RET1(handle, 0);
+			}
+
+			/*
+			 * We return to the external world as if this SIP is not
+			 * implemented in case, we are not running on VDK.
+			 */
+			break;
+
+		default:
+			ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+			break;
+		}
 	}
 
 	SMC_RET1(handle, SMC_UNK);
@@ -176,9 +188,9 @@
 DECLARE_RT_SVC(
 	tegra_sip_fast,
 
-	OEN_SIP_START,
-	OEN_SIP_END,
-	SMC_TYPE_FAST,
-	NULL,
-	tegra_sip_handler
+	(OEN_SIP_START),
+	(OEN_SIP_END),
+	(SMC_TYPE_FAST),
+	(NULL),
+	(tegra_sip_handler)
 );
diff --git a/plat/nvidia/tegra/common/tegra_topology.c b/plat/nvidia/tegra/common/tegra_topology.c
index 893f28f..4f6cf93 100644
--- a/plat/nvidia/tegra/common/tegra_topology.c
+++ b/plat/nvidia/tegra/common/tegra_topology.c
@@ -7,41 +7,38 @@
 #include <platform_def.h>
 
 #include <arch.h>
+#include <platform.h>
 #include <lib/psci/psci.h>
 
-extern const unsigned char tegra_power_domain_tree_desc[];
 #pragma weak plat_core_pos_by_mpidr
 
 /*******************************************************************************
- * This function returns the Tegra default topology tree information.
- ******************************************************************************/
-const unsigned char *plat_get_power_domain_tree_desc(void)
-{
-	return tegra_power_domain_tree_desc;
-}
-
-/*******************************************************************************
  * This function implements a part of the critical interface between the psci
  * generic layer and the platform that allows the former to query the platform
  * to convert an MPIDR to a unique linear index. An error code (-1) is returned
  * in case the MPIDR is invalid.
  ******************************************************************************/
-int plat_core_pos_by_mpidr(u_register_t mpidr)
+int32_t plat_core_pos_by_mpidr(u_register_t mpidr)
 {
-	unsigned int cluster_id, cpu_id;
+	u_register_t cluster_id, cpu_id;
+	int32_t result;
 
-	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
-	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+	cluster_id = (mpidr >> (u_register_t)MPIDR_AFF1_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> (u_register_t)MPIDR_AFF0_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
 
-	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
-		return PSCI_E_NOT_PRESENT;
+	result = (int32_t)cpu_id + ((int32_t)cluster_id * 4);
+
+	if (cluster_id >= (u_register_t)PLATFORM_CLUSTER_COUNT) {
+		result = PSCI_E_NOT_PRESENT;
+	}
 
 	/*
 	 * Validate cpu_id by checking whether it represents a CPU in
 	 * one of the two clusters present on the platform.
 	 */
-	if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
-		return PSCI_E_NOT_PRESENT;
+	if (cpu_id >= (u_register_t)PLATFORM_MAX_CPUS_PER_CLUSTER) {
+		result = PSCI_E_NOT_PRESENT;
+	}
 
-	return (cpu_id + (cluster_id * 4));
+	return result;
 }
diff --git a/plat/nvidia/tegra/include/drivers/bpmp.h b/plat/nvidia/tegra/include/drivers/bpmp.h
new file mode 100644
index 0000000..27f57df
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/bpmp.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BPMP_H
+#define BPMP_H
+
+#include <stdint.h>
+
+/* macro to enable clock to the Atomics block */
+#define CAR_ENABLE_ATOMICS	(1UL << 16)
+
+/* command to get the channel base addresses from bpmp */
+#define ATOMIC_CMD_GET		4UL
+
+/* Hardware IRQ # used to signal bpmp of an incoming command */
+#define INT_SHR_SEM_OUTBOX_FULL	6UL
+
+/* macros to decode the bpmp's state */
+#define CH_MASK(ch)		(0x3UL << ((ch) * 2UL))
+#define MA_FREE(ch)		(0x2UL << ((ch) * 2UL))
+#define MA_ACKD(ch)		(0x3UL << ((ch) * 2UL))
+
+/* response from bpmp to indicate it has powered up */
+#define SIGN_OF_LIFE		0xAAAAAAAAUL
+
+/* flags to indicate bpmp driver's state */
+#define BPMP_INIT_COMPLETE	0xBEEFF00DUL
+#define BPMP_INIT_PENDING	0xDEADBEEFUL
+
+/* requests serviced by the bpmp */
+#define MRQ_PING		0
+#define MRQ_QUERY_TAG		1
+#define MRQ_DO_IDLE		2
+#define MRQ_TOLERATE_IDLE	3
+#define MRQ_MODULE_LOAD		4
+#define MRQ_MODULE_UNLOAD	5
+#define MRQ_SWITCH_CLUSTER	6
+#define MRQ_TRACE_MODIFY	7
+#define MRQ_WRITE_TRACE		8
+#define MRQ_THREADED_PING	9
+#define MRQ_CPUIDLE_USAGE	10
+#define MRQ_MODULE_MAIL		11
+#define MRQ_SCX_ENABLE		12
+#define MRQ_BPMPIDLE_USAGE	14
+#define MRQ_HEAP_USAGE		15
+#define MRQ_SCLK_SKIP_SET_RATE	16
+#define MRQ_ENABLE_SUSPEND	17
+#define MRQ_PASR_MASK		18
+#define MRQ_DEBUGFS		19
+#define MRQ_THERMAL		27
+
+/* Tegra PM states as known to BPMP */
+#define TEGRA_PM_CC1		9
+#define TEGRA_PM_CC4		12
+#define TEGRA_PM_CC6		14
+#define TEGRA_PM_CC7		15
+#define TEGRA_PM_SC1		17
+#define TEGRA_PM_SC2		18
+#define TEGRA_PM_SC3		19
+#define TEGRA_PM_SC4		20
+#define TEGRA_PM_SC7		23
+
+/* flag to indicate if entry into a CCx power state is allowed */
+#define BPMP_CCx_ALLOWED	0UL
+
+/* number of communication channels to interact with the bpmp */
+#define NR_CHANNELS		4U
+
+/* flag to ask bpmp to acknowledge command packet */
+#define NO_ACK			(0UL << 0UL)
+#define DO_ACK			(1UL << 0UL)
+
+/* size of the command/response data */
+#define MSG_DATA_MAX_SZ		120U
+
+/**
+ * command/response packet to/from the bpmp
+ *
+ * command
+ * -------
+ * code: MRQ_* command
+ * flags: DO_ACK or NO_ACK
+ * data:
+ * 	[0] = cpu #
+ * 	[1] = cluster power state (TEGRA_PM_CCx)
+ * 	[2] = system power state (TEGRA_PM_SCx)
+ *
+ * response
+ * ---------
+ * code: error code
+ * flags: not used
+ * data:
+ * 	[0-3] = response value
+ */
+typedef struct mb_data {
+	int32_t code;
+	uint32_t flags;
+	uint8_t data[MSG_DATA_MAX_SZ];
+} mb_data_t;
+
+/**
+ * Function to initialise the interface with the bpmp
+ */
+int tegra_bpmp_init(void);
+
+/**
+ * Handler to send a MRQ_* command to the bpmp
+ */
+int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz,
+		void *ib_data, int ib_sz);
+
+#endif /* BPMP_H */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v2.h b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
index 957ff54..ffe5269 100644
--- a/plat/nvidia/tegra/include/drivers/memctrl_v2.h
+++ b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
@@ -100,6 +100,19 @@
  ******************************************************************************/
 #define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) (addr + sizeof(uint32_t))
 
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_SO_DEV		(0UL << 4)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_SO_DEV	(1UL << 4)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SO_DEV		(2UL << 4)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_SO_DEV	(3UL << 4)
+
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_NORMAL		(0UL << 8)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_NORMAL	(1UL << 8)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_NORMAL		(2UL << 8)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_NORMAL	(3UL << 8)
+
+#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_ZERO				(0UL << 12)
+#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_CLIENT_AXI_ID		(1UL << 12)
+
 /*******************************************************************************
  * Memory Controller transaction override config registers
  ******************************************************************************/
@@ -312,98 +325,51 @@
 /*******************************************************************************
  * Memory Controller's PCFIFO client configuration registers
  ******************************************************************************/
-#define MC_PCFIFO_CLIENT_CONFIG1			0xdd4
-#define  MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL		0x20000
-#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED	(0 << 17)
-#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK	(1 << 17)
-#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED	(0 << 21)
-#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK	(1 << 21)
-#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0 << 29)
-#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK	(1 << 29)
-
-#define MC_PCFIFO_CLIENT_CONFIG2			0xdd8
-#define  MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL		0x20000
-#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED	(0 << 11)
-#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK	(1 << 11)
-#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED	(0 << 13)
-#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK	(1 << 13)
-
-#define MC_PCFIFO_CLIENT_CONFIG3			0xddc
-#define  MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL		0
-#define  MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED	(0 << 7)
-#define  MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK	(1 << 7)
-
-#define MC_PCFIFO_CLIENT_CONFIG4		0xde0
-#define  MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL	0
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0 << 1)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK	(1 << 1)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED	(0 << 5)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK	(1 << 5)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0 << 13)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK	(1 << 13)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0 << 15)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK	(1 << 15)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED	(0 << 17)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK	(1 << 17)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED	(0 << 22)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK	(1 << 22)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED	(0 << 26)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK	(1 << 26)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED	(0 << 30)
-#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK	(1 << 30)
-
-#define MC_PCFIFO_CLIENT_CONFIG5		0xbf4
-#define  MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL	0
-#define  MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED	(0 << 0)
-#define  MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK	(1 << 0)
-
-/*******************************************************************************
- * Memory Controller's SMMU client configuration registers
- ******************************************************************************/
-#define MC_SMMU_CLIENT_CONFIG1				0x44
-#define  MC_SMMU_CLIENT_CONFIG1_RESET_VAL		0x20000
-#define  MC_SMMU_CLIENT_CONFIG1_AFIW_UNORDERED		(0 << 17)
-#define  MC_SMMU_CLIENT_CONFIG1_AFIW_MASK		(1 << 17)
-#define  MC_SMMU_CLIENT_CONFIG1_HDAW_UNORDERED		(0 << 21)
-#define  MC_SMMU_CLIENT_CONFIG1_HDAW_MASK		(1 << 21)
-#define  MC_SMMU_CLIENT_CONFIG1_SATAW_UNORDERED		(0 << 29)
-#define  MC_SMMU_CLIENT_CONFIG1_SATAW_MASK		(1 << 29)
+#define MC_PCFIFO_CLIENT_CONFIG1				0xdd4UL
+#define  MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL			0x20000UL
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED		(0UL << 17)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK		(1UL << 17)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED		(0UL << 21)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK		(1UL << 21)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED	(0UL << 29)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK		(1UL << 29)
 
-#define MC_SMMU_CLIENT_CONFIG2				0x48
-#define  MC_SMMU_CLIENT_CONFIG2_RESET_VAL		0x20000
-#define  MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_UNORDERED	(0 << 11)
-#define  MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_MASK		(1 << 11)
-#define  MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_UNORDERED	(0 << 13)
-#define  MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_MASK		(1 << 13)
+#define MC_PCFIFO_CLIENT_CONFIG2				0xdd8UL
+#define  MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL			0x20000UL
+#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED	(0UL << 11)
+#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK	(1UL << 11)
+#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED	(0UL << 13)
+#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK		(1UL << 13)
 
-#define MC_SMMU_CLIENT_CONFIG3				0x4c
-#define  MC_SMMU_CLIENT_CONFIG3_RESET_VAL		0
-#define  MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_UNORDERED	(0 << 7)
-#define  MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_MASK		(1 << 7)
+#define MC_PCFIFO_CLIENT_CONFIG3				0xddcUL
+#define  MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL			0UL
+#define  MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED	(0UL << 7)
+#define  MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK		(1UL << 7)
 
-#define MC_SMMU_CLIENT_CONFIG4				0xb9c
-#define  MC_SMMU_CLIENT_CONFIG4_RESET_VAL		0
-#define  MC_SMMU_CLIENT_CONFIG4_SESWR_UNORDERED		(0 << 1)
-#define  MC_SMMU_CLIENT_CONFIG4_SESWR_MASK		(1 << 1)
-#define  MC_SMMU_CLIENT_CONFIG4_ETRW_UNORDERED		(0 << 5)
-#define  MC_SMMU_CLIENT_CONFIG4_ETRW_MASK		(1 << 5)
-#define  MC_SMMU_CLIENT_CONFIG4_AXISW_UNORDERED		(0 << 13)
-#define  MC_SMMU_CLIENT_CONFIG4_AXISW_MASK		(1 << 13)
-#define  MC_SMMU_CLIENT_CONFIG4_EQOSW_UNORDERED		(0 << 15)
-#define  MC_SMMU_CLIENT_CONFIG4_EQOSW_MASK		(1 << 15)
-#define  MC_SMMU_CLIENT_CONFIG4_UFSHCW_UNORDERED	(0 << 17)
-#define  MC_SMMU_CLIENT_CONFIG4_UFSHCW_MASK		(1 << 17)
-#define  MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_UNORDERED	(0 << 22)
-#define  MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_MASK		(1 << 22)
-#define  MC_SMMU_CLIENT_CONFIG4_AONDMAW_UNORDERED	(0 << 26)
-#define  MC_SMMU_CLIENT_CONFIG4_AONDMAW_MASK		(1 << 26)
-#define  MC_SMMU_CLIENT_CONFIG4_SCEDMAW_UNORDERED	(0 << 30)
-#define  MC_SMMU_CLIENT_CONFIG4_SCEDMAW_MASK		(1 << 30)
+#define MC_PCFIFO_CLIENT_CONFIG4				0xde0UL
+#define  MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL			0UL
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED	(0UL << 1)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK		(1UL << 1)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED		(0UL << 5)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK		(1UL << 5)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED	(0UL << 13)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK		(1UL << 13)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED	(0UL << 15)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_ORDERED		(1UL << 15)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK		(1UL << 15)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED	(0UL << 17)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK		(1UL << 17)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED	(0UL << 22)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK		(1UL << 22)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED	(0UL << 26)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK		(1UL << 26)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED	(0UL << 30)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK		(1UL << 30)
 
-#define MC_SMMU_CLIENT_CONFIG5				0xbac
-#define  MC_SMMU_CLIENT_CONFIG5_RESET_VAL		0
-#define  MC_SMMU_CLIENT_CONFIG5_APEDMAW_UNORDERED	(0 << 0)
-#define  MC_SMMU_CLIENT_CONFIG5_APEDMAW_MASK	(1 << 0)
+#define MC_PCFIFO_CLIENT_CONFIG5				0xbf4UL
+#define  MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL			0UL
+#define  MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED	(0UL << 0)
+#define  MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK		(1UL << 0)
 
 #ifndef __ASSEMBLY__
 
@@ -433,9 +399,8 @@
 	(~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
 	 MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED)
 
-#define mc_set_smmu_unordered_boot_so_mss(id, client) \
-	(~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
-	 MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED)
+#define mc_set_pcfifo_ordered_boot_so_mss(id, client) \
+	 MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_ORDERED
 
 #define mc_set_tsa_passthrough(client) \
 	{ \
@@ -445,25 +410,13 @@
 			TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
 	}
 
-#define mc_set_forced_coherent_cfg(client) \
-	{ \
-		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
-			MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV); \
-	}
-
-#define mc_set_forced_coherent_so_dev_cfg(client) \
-	{ \
-		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
-			MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \
-			MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \
-	}
-
-#define mc_set_forced_coherent_axid_so_dev_cfg(client) \
+#define mc_set_txn_override(client, normal_axi_id, so_dev_axi_id, normal_override, so_dev_override) \
 	{ \
 		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
-			MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \
-			MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID | \
-			MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \
+				  MC_TXN_OVERRIDE_##normal_axi_id | \
+				  MC_TXN_OVERRIDE_CONFIG_COH_PATH_##so_dev_override##_SO_DEV | \
+				  MC_TXN_OVERRIDE_CONFIG_COH_PATH_##normal_override##_NORMAL | \
+				  MC_TXN_OVERRIDE_CONFIG_CGID_##so_dev_axi_id); \
 	}
 
 /*******************************************************************************
diff --git a/plat/nvidia/tegra/include/drivers/security_engine.h b/plat/nvidia/tegra/include/drivers/security_engine.h
new file mode 100644
index 0000000..abfb217
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/security_engine.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SECURITY_ENGINE_H
+#define SECURITY_ENGINE_H
+
+/*******************************************************************************
+ * Structure definition
+ ******************************************************************************/
+
+/* Security Engine Linked List */
+struct tegra_se_ll {
+	/* DMA buffer address */
+	uint32_t addr;
+	/* Data length in DMA buffer */
+	uint32_t data_len;
+};
+
+#define SE_LL_MAX_BUFFER_NUM			4
+typedef struct tegra_se_io_lst {
+	volatile uint32_t last_buff_num;
+	volatile struct tegra_se_ll buffer[SE_LL_MAX_BUFFER_NUM];
+} tegra_se_io_lst_t __attribute__((aligned(4)));
+
+/* SE device structure */
+typedef struct tegra_se_dev {
+	/* Security Engine ID */
+	const int se_num;
+	/* SE base address */
+	const uint64_t se_base;
+	/* SE context size in AES blocks */
+	const uint32_t ctx_size_blks;
+	/* pointer to source linked list buffer */
+	tegra_se_io_lst_t *src_ll_buf;
+	/* pointer to destination linked list buffer */
+	tegra_se_io_lst_t *dst_ll_buf;
+} tegra_se_dev_t;
+
+/*******************************************************************************
+ * Public interface
+ ******************************************************************************/
+void tegra_se_init(void);
+int tegra_se_suspend(void);
+void tegra_se_resume(void);
+int tegra_se_save_tzram(void);
+
+#endif /* SECURITY_ENGINE_H */
diff --git a/plat/nvidia/tegra/include/platform_def.h b/plat/nvidia/tegra/include/platform_def.h
index d10dc26..0a0126b 100644
--- a/plat/nvidia/tegra/include/platform_def.h
+++ b/plat/nvidia/tegra/include/platform_def.h
@@ -34,7 +34,8 @@
  * Platform console related constants
  ******************************************************************************/
 #define TEGRA_CONSOLE_BAUDRATE		U(115200)
-#define TEGRA_BOOT_UART_CLK_IN_HZ	U(408000000)
+#define TEGRA_BOOT_UART_CLK_13_MHZ	U(13000000)
+#define TEGRA_BOOT_UART_CLK_408_MHZ	U(408000000)
 
 /*******************************************************************************
  * Platform memory map related constants
diff --git a/plat/nvidia/tegra/include/t210/tegra_def.h b/plat/nvidia/tegra/include/t210/tegra_def.h
index 14cdfd5..8d71cae 100644
--- a/plat/nvidia/tegra/include/t210/tegra_def.h
+++ b/plat/nvidia/tegra/include/t210/tegra_def.h
@@ -33,6 +33,11 @@
 #define PLAT_MAX_OFF_STATE		(PSTATE_ID_SOC_POWERDN + U(1))
 
 /*******************************************************************************
+ * iRAM memory constants
+ ******************************************************************************/
+#define TEGRA_IRAM_BASE			0x40000000
+
+/*******************************************************************************
  * GIC memory map
  ******************************************************************************/
 #define TEGRA_GICD_BASE			U(0x50041000)
@@ -56,6 +61,20 @@
 					 ENABLE_WRAP_INCR_MASTER0_BIT)
 
 /*******************************************************************************
+ * Tegra Resource Semaphore constants
+ ******************************************************************************/
+#define TEGRA_RES_SEMA_BASE		0x60001000UL
+#define  STA_OFFSET			0UL
+#define  SET_OFFSET			4UL
+#define  CLR_OFFSET			8UL
+
+/*******************************************************************************
+ * Tegra Primary Interrupt Controller constants
+ ******************************************************************************/
+#define TEGRA_PRI_ICTLR_BASE		0x60004000UL
+#define  CPU_IEP_FIR_SET		0x18UL
+
+/*******************************************************************************
  * Tegra micro-seconds timer constants
  ******************************************************************************/
 #define TEGRA_TMRUS_BASE		U(0x60005010)
@@ -67,6 +86,8 @@
 #define TEGRA_CAR_RESET_BASE		U(0x60006000)
 #define TEGRA_GPU_RESET_REG_OFFSET	U(0x28C)
 #define  GPU_RESET_BIT			(U(1) << 24)
+#define TEGRA_RST_DEV_CLR_V		U(0x434)
+#define TEGRA_CLK_ENB_V			U(0x440)
 
 /*******************************************************************************
  * Tegra Flow Controller constants
@@ -74,6 +95,11 @@
 #define TEGRA_FLOWCTRL_BASE		U(0x60007000)
 
 /*******************************************************************************
+ * Tegra AHB arbitration controller
+ ******************************************************************************/
+#define TEGRA_AHB_ARB_BASE		0x6000C000UL
+
+/*******************************************************************************
  * Tegra Secure Boot Controller constants
  ******************************************************************************/
 #define TEGRA_SB_BASE			U(0x6000C200)
@@ -104,6 +130,15 @@
 #define TEGRA_PMC_BASE			U(0x7000E400)
 
 /*******************************************************************************
+ * Tegra Atomics constants
+ ******************************************************************************/
+#define TEGRA_ATOMICS_BASE		0x70016000UL
+#define  TRIGGER0_REG_OFFSET		0UL
+#define  TRIGGER_WIDTH_SHIFT		4UL
+#define  TRIGGER_ID_SHIFT		16UL
+#define  RESULT0_REG_OFFSET		0xC00UL
+
+/*******************************************************************************
  * Tegra Memory Controller constants
  ******************************************************************************/
 #define TEGRA_MC_BASE			U(0x70019000)
@@ -119,6 +154,15 @@
 #define MC_VIDEO_PROTECT_SIZE_MB	U(0x64c)
 
 /*******************************************************************************
+ * Tegra SE constants
+ ******************************************************************************/
+#define TEGRA_SE1_BASE			U(0x70012000)
+#define TEGRA_SE2_BASE			U(0x70412000)
+#define TEGRA_PKA1_BASE			U(0x70420000)
+#define TEGRA_SE2_RANGE_SIZE		U(0x2000)
+#define SE_TZRAM_SECURITY		U(0x4)
+
+/*******************************************************************************
  * Tegra TZRAM constants
  ******************************************************************************/
 #define TEGRA_TZRAM_BASE		U(0x7C010000)
diff --git a/plat/nvidia/tegra/include/tegra_platform.h b/plat/nvidia/tegra/include/tegra_platform.h
index 63a0e01..1e7ba16 100644
--- a/plat/nvidia/tegra/include/tegra_platform.h
+++ b/plat/nvidia/tegra/include/tegra_platform.h
@@ -8,27 +8,56 @@
 #define TEGRA_PLATFORM_H
 
 #include <cdefs.h>
+#include <stdbool.h>
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Tegra major, minor version helper macros
+ ******************************************************************************/
+#define MAJOR_VERSION_SHIFT		U(0x4)
+#define MAJOR_VERSION_MASK		U(0xF)
+#define MINOR_VERSION_SHIFT		U(0x10)
+#define MINOR_VERSION_MASK		U(0xF)
+#define CHIP_ID_SHIFT			U(8)
+#define CHIP_ID_MASK			U(0xFF)
+#define PRE_SI_PLATFORM_SHIFT		U(0x14)
+#define PRE_SI_PLATFORM_MASK		U(0xF)
+
+/*******************************************************************************
+ * Tegra chip ID values
+ ******************************************************************************/
+#define TEGRA_CHIPID_TEGRA13		U(0x13)
+#define TEGRA_CHIPID_TEGRA21		U(0x21)
+#define TEGRA_CHIPID_TEGRA18		U(0x18)
+
+#ifndef __ASSEMBLY__
 
 /*
- * Tegra chip major/minor version
+ * Tegra chip ID major/minor identifiers
  */
 uint32_t tegra_get_chipid_major(void);
 uint32_t tegra_get_chipid_minor(void);
 
 /*
- * Tegra chip identifiers
+ * Tegra chip ID identifiers
  */
-uint8_t tegra_chipid_is_t132(void);
-uint8_t tegra_chipid_is_t210(void);
-uint8_t tegra_chipid_is_t186(void);
+bool tegra_chipid_is_t132(void);
+bool tegra_chipid_is_t186(void);
+bool tegra_chipid_is_t210(void);
+bool tegra_chipid_is_t210_b01(void);
 
 
 /*
  * Tegra platform identifiers
  */
-uint8_t tegra_platform_is_silicon(void);
-uint8_t tegra_platform_is_qt(void);
-uint8_t tegra_platform_is_emulation(void);
-uint8_t tegra_platform_is_fpga(void);
+bool tegra_platform_is_silicon(void);
+bool tegra_platform_is_qt(void);
+bool tegra_platform_is_emulation(void);
+bool tegra_platform_is_linsim(void);
+bool tegra_platform_is_fpga(void);
+bool tegra_platform_is_unit_fpga(void);
+bool tegra_platform_is_virt_dev_kit(void);
+
+#endif /* __ASSEMBLY__ */
 
 #endif /* TEGRA_PLATFORM_H */
diff --git a/plat/nvidia/tegra/include/tegra_private.h b/plat/nvidia/tegra/include/tegra_private.h
index 93223cc..1682927 100644
--- a/plat/nvidia/tegra/include/tegra_private.h
+++ b/plat/nvidia/tegra/include/tegra_private.h
@@ -32,9 +32,16 @@
 	uint64_t tzdram_base;
 	/* UART port ID */
 	int uart_id;
+	/* L2 ECC parity protection disable flag */
+	int l2_ecc_parity_prot_dis;
 } plat_params_from_bl2_t;
 
 /*******************************************************************************
+ * Helper function to access l2ctlr_el1 register on Cortex-A57 CPUs
+ ******************************************************************************/
+DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A57_L2CTLR_EL1)
+
+/*******************************************************************************
  * Struct describing parameters passed to bl31
  ******************************************************************************/
 struct tegra_bl31_params {
@@ -47,19 +54,19 @@
 };
 
 /* Declarations for plat_psci_handlers.c */
-int32_t tegra_soc_validate_power_state(unsigned int power_state,
+int32_t tegra_soc_validate_power_state(uint32_t power_state,
 		psci_power_state_t *req_state);
 
 /* Declarations for plat_setup.c */
 const mmap_region_t *plat_get_mmio_map(void);
-uint32_t plat_get_console_from_id(int id);
+uint32_t plat_get_console_from_id(int32_t id);
 void plat_gic_setup(void);
 struct tegra_bl31_params *plat_get_bl31_params(void);
 plat_params_from_bl2_t *plat_get_bl31_plat_params(void);
 
 /* Declarations for plat_secondary.c */
 void plat_secondary_setup(void);
-int plat_lock_cpu_vectors(void);
+int32_t plat_lock_cpu_vectors(void);
 
 /* Declarations for tegra_fiq_glue.c */
 void tegra_fiq_handler_setup(void);
@@ -92,4 +99,22 @@
 void tegra_secure_entrypoint(void);
 void tegra186_cpu_reset_handler(void);
 
+/* Declarations for tegra_sip_calls.c */
+uintptr_t tegra_sip_handler(uint32_t smc_fid,
+			    u_register_t x1,
+			    u_register_t x2,
+			    u_register_t x3,
+			    u_register_t x4,
+			    void *cookie,
+			    void *handle,
+			    u_register_t flags);
+int plat_sip_handler(uint32_t smc_fid,
+		     uint64_t x1,
+		     uint64_t x2,
+		     uint64_t x3,
+		     uint64_t x4,
+		     const void *cookie,
+		     void *handle,
+		     uint64_t flags);
+
 #endif /* TEGRA_PRIVATE_H */
diff --git a/plat/nvidia/tegra/soc/t132/plat_setup.c b/plat/nvidia/tegra/soc/t132/plat_setup.c
index f72b73e..3f9cda9 100644
--- a/plat/nvidia/tegra/soc/t132/plat_setup.c
+++ b/plat/nvidia/tegra/soc/t132/plat_setup.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,25 +7,10 @@
 #include <arch_helpers.h>
 #include <common/bl_common.h>
 #include <lib/xlat_tables/xlat_tables_v2.h>
-
+#include <platform.h>
 #include <tegra_def.h>
 #include <tegra_private.h>
 
-/*******************************************************************************
- * The Tegra power domain tree has a single system level power domain i.e. a
- * single root node. The first entry in the power domain descriptor specifies
- * the number of power domains at the highest power level.
- *******************************************************************************
- */
-const unsigned char tegra_power_domain_tree_desc[] = {
-	/* No of root nodes */
-	1,
-	/* No of clusters */
-	PLATFORM_CLUSTER_COUNT,
-	/* No of CPU cores */
-	PLATFORM_CORE_COUNT,
-};
-
 /* sets of MMIO ranges setup */
 #define MMIO_RANGE_0_ADDR	0x50000000
 #define MMIO_RANGE_1_ADDR	0x60000000
@@ -54,6 +39,29 @@
 	return tegra_mmap;
 }
 
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	1,
+	/* No of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of CPU cores */
+	PLATFORM_CORE_COUNT,
+};
+
+/*******************************************************************************
+ * This function returns the Tegra default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return tegra_power_domain_tree_desc;
+}
+
 unsigned int plat_get_syscnt_freq2(void)
 {
 	return 12000000;
diff --git a/plat/nvidia/tegra/soc/t132/plat_sip_calls.c b/plat/nvidia/tegra/soc/t132/plat_sip_calls.c
index 02dd1cd..90c6bb2 100644
--- a/plat/nvidia/tegra/soc/t132/plat_sip_calls.c
+++ b/plat/nvidia/tegra/soc/t132/plat_sip_calls.c
@@ -38,7 +38,7 @@
 		     uint64_t x2,
 		     uint64_t x3,
 		     uint64_t x4,
-		     void *cookie,
+		     const void *cookie,
 		     void *handle,
 		     uint64_t flags)
 {
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
index 1429a61..599e46e 100644
--- a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
@@ -99,9 +99,9 @@
 		ret = 0;
 	} else {
 		/* For shutdown/reboot commands, we dont have to check for timeouts */
-		if ((req == (uint32_t)TEGRA_ARI_MISC_CCPLEX) &&
-		    ((lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) ||
-		     (lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) {
+		if ((req == TEGRA_ARI_MISC_CCPLEX) &&
+		    ((lo == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) ||
+		     (lo == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) {
 				ret = 0;
 		} else {
 			/*
@@ -161,38 +161,38 @@
 	uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
 	uint8_t update_wake_mask)
 {
-	uint32_t val = 0U;
+	uint64_t val = 0U;
 
 	/* clean the previous response state */
 	ari_clobber_response(ari_base);
 
 	/* update CLUSTER_CSTATE? */
 	if (cluster != 0U) {
-		val |= (cluster & (uint32_t)CLUSTER_CSTATE_MASK) |
-			(uint32_t)CLUSTER_CSTATE_UPDATE_BIT;
+		val |= (cluster & CLUSTER_CSTATE_MASK) |
+			CLUSTER_CSTATE_UPDATE_BIT;
 	}
 
 	/* update CCPLEX_CSTATE? */
 	if (ccplex != 0U) {
-		val |= ((ccplex & (uint32_t)CCPLEX_CSTATE_MASK) << (uint32_t)CCPLEX_CSTATE_SHIFT) |
-			(uint32_t)CCPLEX_CSTATE_UPDATE_BIT;
+		val |= ((ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
+			CCPLEX_CSTATE_UPDATE_BIT;
 	}
 
 	/* update SYSTEM_CSTATE? */
 	if (system != 0U) {
-		val |= ((system & (uint32_t)SYSTEM_CSTATE_MASK) << (uint32_t)SYSTEM_CSTATE_SHIFT) |
-		       (((uint32_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
-			(uint32_t)SYSTEM_CSTATE_UPDATE_BIT);
+		val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+		       (((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+			SYSTEM_CSTATE_UPDATE_BIT);
 	}
 
 	/* update wake mask value? */
 	if (update_wake_mask != 0U) {
-		val |= (uint32_t)CSTATE_WAKE_MASK_UPDATE_BIT;
+		val |= CSTATE_WAKE_MASK_UPDATE_BIT;
 	}
 
 	/* set the updated cstate info */
-	return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO, val,
-			wake_mask);
+	return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO,
+				(uint32_t)val, wake_mask);
 }
 
 int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
@@ -299,10 +299,8 @@
 	int32_t ret, result;
 
 	/* check for allowed power state */
-	if ((state != TEGRA_ARI_CORE_C0) &&
-	    (state != TEGRA_ARI_CORE_C1) &&
-	    (state != TEGRA_ARI_CORE_C6) &&
-	    (state != TEGRA_ARI_CORE_C7)) {
+	if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+	    (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
 		ERROR("%s: unknown cstate (%d)\n", __func__, state);
 		result = EINVAL;
 	} else {
@@ -325,10 +323,10 @@
 
 int32_t ari_online_core(uint32_t ari_base, uint32_t core)
 {
-	uint64_t cpu = read_mpidr() & (uint64_t)(MPIDR_CPU_MASK);
-	uint64_t cluster = (read_mpidr() & (uint64_t)(MPIDR_CLUSTER_MASK)) >>
-			   (uint64_t)(MPIDR_AFFINITY_BITS);
-	uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+	uint64_t cpu = read_mpidr() & (MPIDR_CPU_MASK);
+	uint64_t cluster = (read_mpidr() & (MPIDR_CLUSTER_MASK)) >>
+			   (MPIDR_AFFINITY_BITS);
+	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
 	int32_t ret;
 
 	/* construct the current CPU # */
@@ -342,8 +340,7 @@
 		/*
 		 * The Denver cluster has 2 CPUs only - 0, 1.
 		 */
-		if ((impl == (uint32_t)DENVER_IMPL) &&
-		    ((core == 2U) || (core == 3U))) {
+		if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) {
 			ERROR("%s: unknown core id (%d)\n", __func__, core);
 			ret = EINVAL;
 		} else {
@@ -465,7 +462,7 @@
 {
 	int32_t ret = 0;
 	/* sanity check GSC ID */
-	if (gsc_idx > (uint32_t)TEGRA_ARI_GSC_VPR_IDX) {
+	if (gsc_idx > TEGRA_ARI_GSC_VPR_IDX) {
 		ret = EINVAL;
 	} else {
 		/* clean the previous response state */
@@ -497,8 +494,8 @@
 		uint64_t *data)
 {
 	int32_t ret, result;
-	uint32_t val;
-	uint8_t req_cmd, req_status;
+	uint32_t val, req_status;
+	uint8_t req_cmd;
 
 	req_cmd = (uint8_t)(req >> UNCORE_PERFMON_CMD_SHIFT);
 
@@ -523,7 +520,7 @@
 			result = ret;
 		} else {
 			/* read the command status value */
-			req_status = (uint8_t)ari_get_response_high(ari_base) &
+			req_status = ari_get_response_high(ari_base) &
 					 UNCORE_PERFMON_RESP_STATUS_MASK;
 
 			/*
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
index 828ad3c..e948e99 100644
--- a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
@@ -111,8 +111,8 @@
 static uint32_t mce_get_curr_cpu_ari_base(void)
 {
 	uint64_t mpidr = read_mpidr();
-	uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
-	uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+	uint64_t cpuid = mpidr & MPIDR_CPU_MASK;
+	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
 
 	/*
 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
@@ -131,9 +131,9 @@
 static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
 {
 	uint64_t mpidr = read_mpidr();
-	uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
-	uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
-			(uint64_t)MIDR_IMPL_MASK;
+	uint64_t cpuid = mpidr & MPIDR_CPU_MASK;
+	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) &
+			MIDR_IMPL_MASK;
 
 	/*
 	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
@@ -172,9 +172,6 @@
 	switch (cmd) {
 	case MCE_CMD_ENTER_CSTATE:
 		ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
-		if (ret < 0) {
-			ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
-		}
 
 		break;
 
@@ -183,30 +180,22 @@
 		 * get the parameters required for the update cstate info
 		 * command
 		 */
-		arg3 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4));
-		arg4 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5));
-		arg5 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6));
+		arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
+		arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
+		arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
 
 		ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
 				(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
 				(uint32_t)arg4, (uint8_t)arg5);
-		if (ret < 0) {
-			ERROR("%s: update_cstate_info failed(%d)\n",
-				__func__, ret);
-		}
 
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4), (0));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5), (0));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6), (0));
+		write_ctx_reg(gp_regs, CTX_GPREG_X4, (0ULL));
+		write_ctx_reg(gp_regs, CTX_GPREG_X5, (0ULL));
+		write_ctx_reg(gp_regs, CTX_GPREG_X6, (0ULL));
 
 		break;
 
 	case MCE_CMD_UPDATE_CROSSOVER_TIME:
 		ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
-		if (ret < 0) {
-			ERROR("%s: update_crossover_time failed(%d)\n",
-				__func__, ret);
-		}
 
 		break;
 
@@ -214,61 +203,40 @@
 		ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
 
 		/* update context to return cstate stats value */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (ret64));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, (ret64));
 
 		break;
 
 	case MCE_CMD_WRITE_CSTATE_STATS:
 		ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
-		if (ret < 0) {
-			ERROR("%s: write_cstate_stats failed(%d)\n",
-				__func__, ret);
-		}
 
 		break;
 
 	case MCE_CMD_IS_CCX_ALLOWED:
 		ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
-		if (ret < 0) {
-			ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
-			break;
-		}
 
 		/* update context to return CCx status value */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
-			      (uint64_t)(ret));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint64_t)(ret));
 
 		break;
 
 	case MCE_CMD_IS_SC7_ALLOWED:
 		ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
-		if (ret < 0) {
-			ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
-			break;
-		}
 
 		/* update context to return SC7 status value */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
-			      (uint64_t)(ret));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3),
-			      (uint64_t)(ret));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint64_t)(ret));
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, (uint64_t)(ret));
 
 		break;
 
 	case MCE_CMD_ONLINE_CORE:
 		ret = ops->online_core(cpu_ari_base, arg0);
-		if (ret < 0) {
-			ERROR("%s: online_core failed(%d)\n", __func__, ret);
-		}
 
 		break;
 
 	case MCE_CMD_CC3_CTRL:
 		ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
-		if (ret < 0) {
-			ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
-		}
 
 		break;
 
@@ -277,10 +245,10 @@
 				arg0);
 
 		/* update context to return if echo'd data matched source */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
-			      ((ret64 == arg0) ? 1ULL : 0ULL));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
-			      ((ret64 == arg0) ? 1ULL : 0ULL));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ((ret64 == arg0) ?
+			      1ULL : 0ULL));
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, ((ret64 == arg0) ?
+			      1ULL : 0ULL));
 
 		break;
 
@@ -292,10 +260,8 @@
 		 * version = minor(63:32) | major(31:0). Update context
 		 * to return major and minor version number.
 		 */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
-			      (ret64));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
-			      (ret64 >> 32ULL));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, (ret64 >> 32ULL));
 
 		break;
 
@@ -304,32 +270,22 @@
 				TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
 
 		/* update context to return features value */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
 
 		break;
 
 	case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
 		ret = ops->roc_flush_cache_trbits(cpu_ari_base);
-		if (ret < 0) {
-			ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
-				ret);
-		}
 
 		break;
 
 	case MCE_CMD_ROC_FLUSH_CACHE:
 		ret = ops->roc_flush_cache(cpu_ari_base);
-		if (ret < 0) {
-			ERROR("%s: flush cache failed(%d)\n", __func__, ret);
-		}
 
 		break;
 
 	case MCE_CMD_ROC_CLEAN_CACHE:
 		ret = ops->roc_clean_cache(cpu_ari_base);
-		if (ret < 0) {
-			ERROR("%s: clean cache failed(%d)\n", __func__, ret);
-		}
 
 		break;
 
@@ -337,9 +293,9 @@
 		ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
 
 		/* update context to return MCA data/error */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (arg1));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, (arg1));
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, (ret64));
 
 		break;
 
@@ -347,8 +303,8 @@
 		ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
 
 		/* update context to return MCA error */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, (ret64));
 
 		break;
 
@@ -375,7 +331,7 @@
 		ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
 
 		/* update context to return data */
-		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (arg1));
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (arg1));
 		break;
 
 	case MCE_CMD_MISC_CCPLEX:
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
index 1ac3710..44ee8fb 100644
--- a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
@@ -200,15 +200,14 @@
 
 int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
 {
-	uint64_t cpu = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
-	uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
-			(uint64_t)MIDR_IMPL_MASK;
+	uint64_t cpu = read_mpidr() & MPIDR_CPU_MASK;
+	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
 	int32_t ret = 0;
 
 	(void)ari_base;
 
 	/* sanity check code id */
-	if ((core >= (uint32_t)MCE_CORE_ID_MAX) || (cpu == core)) {
+	if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
 		ERROR("%s: unsupported core id (%d)\n", __func__, core);
 		ret = EINVAL;
 	} else {
diff --git a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
index fb94bce..7a9ce28 100644
--- a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
+++ b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
@@ -12,6 +12,7 @@
 #include <common/bl_common.h>
 #include <common/debug.h>
 #include <context.h>
+#include <cortex_a57.h>
 #include <denver.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/psci/psci.h>
@@ -29,32 +30,33 @@
 extern uint32_t __tegra186_cpu_reset_handler_end,
 		__tegra186_smmu_context;
 
+/* TZDRAM offset for saving SMMU context */
+#define TEGRA186_SMMU_CTX_OFFSET	16UL
+
 /* state id mask */
-#define TEGRA186_STATE_ID_MASK		0xF
+#define TEGRA186_STATE_ID_MASK		0xFU
 /* constants to get power state's wake time */
-#define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0
-#define TEGRA186_WAKE_TIME_SHIFT	4
+#define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0U
+#define TEGRA186_WAKE_TIME_SHIFT	4U
 /* default core wake mask for CPU_SUSPEND */
-#define TEGRA186_CORE_WAKE_MASK		0x180c
+#define TEGRA186_CORE_WAKE_MASK		0x180cU
 /* context size to save during system suspend */
-#define TEGRA186_SE_CONTEXT_SIZE	3
+#define TEGRA186_SE_CONTEXT_SIZE	3U
 
 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
-static struct t18x_psci_percpu_data {
-	unsigned int wake_time;
-} __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT];
+static struct tegra_psci_percpu_data {
+	uint32_t wake_time;
+} __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
 
-/* System power down state */
-uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF;
-
-int32_t tegra_soc_validate_power_state(unsigned int power_state,
+int32_t tegra_soc_validate_power_state(uint32_t power_state,
 					psci_power_state_t *req_state)
 {
-	int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
-	int cpu = plat_my_core_pos();
+	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
+	uint32_t cpu = plat_my_core_pos();
+	int32_t ret = PSCI_E_SUCCESS;
 
 	/* save the core wake time (in TSC ticks)*/
-	percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
+	tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
 			<< TEGRA186_WAKE_TIME_SHIFT;
 
 	/*
@@ -64,8 +66,8 @@
 	 * from DRAM in that function, because the L2 cache is not flushed
 	 * unless the cluster is entering CC6/CC7.
 	 */
-	clean_dcache_range((uint64_t)&percpu_data[cpu],
-			sizeof(percpu_data[cpu]));
+	clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
+			sizeof(tegra_percpu_data[cpu]));
 
 	/* Sanity check the requested state id */
 	switch (state_id) {
@@ -80,18 +82,19 @@
 
 	default:
 		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
-		return PSCI_E_INVALID_PARAMS;
+		ret = PSCI_E_INVALID_PARAMS;
+		break;
 	}
 
-	return PSCI_E_SUCCESS;
+	return ret;
 }
 
-int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
 {
 	const plat_local_state_t *pwr_domain_state;
-	unsigned int stateid_afflvl0, stateid_afflvl2;
-	int cpu = plat_my_core_pos();
-	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+	uint8_t stateid_afflvl0, stateid_afflvl2;
+	uint32_t cpu = plat_my_core_pos();
+	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
 	mce_cstate_info_t cstate_info = { 0 };
 	uint64_t smmu_ctx_base;
 	uint32_t val;
@@ -109,8 +112,8 @@
 		/* Enter CPU idle/powerdown */
 		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
 			TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
-		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val,
-				percpu_data[cpu].wake_time, 0);
+		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
+				tegra_percpu_data[cpu].wake_time, 0U);
 
 	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
 
@@ -138,18 +141,20 @@
 		cstate_info.system_state_force = 1;
 		cstate_info.update_wake_mask = 1;
 		mce_update_cstate_info(&cstate_info);
-
 		/* Loop until system suspend is allowed */
 		do {
-			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
+			val = (uint32_t)mce_command_handler(
+					(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
 					TEGRA_ARI_CORE_C7,
 					MCE_CORE_SLEEP_TIME_INFINITE,
-					0);
-		} while (val == 0);
+					0U);
+		} while (val == 0U);
 
 		/* Instruct the MCE to enter system suspend state */
-		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
-			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
+		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
+			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
+	} else {
+		; /* do nothing */
 	}
 
 	return PSCI_E_SUCCESS;
@@ -159,23 +164,28 @@
  * Platform handler to calculate the proper target power level at the
  * specified affinity level
  ******************************************************************************/
-plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
 					     const plat_local_state_t *states,
-					     unsigned int ncpu)
+					     uint32_t ncpu)
 {
 	plat_local_state_t target = *states;
-	int cpu = plat_my_core_pos(), ret, cluster_powerdn = 1;
-	int core_pos = read_mpidr() & MPIDR_CPU_MASK;
+	uint32_t pos = 0;
+	plat_local_state_t result = PSCI_LOCAL_STATE_RUN;
+	uint32_t cpu = plat_my_core_pos(), num_cpu = ncpu;
+	int32_t ret, cluster_powerdn = 1;
+	uint64_t core_pos = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
 	mce_cstate_info_t cstate_info = { 0 };
 
 	/* get the power state at this level */
-	if (lvl == MPIDR_AFFLVL1)
-		target = *(states + core_pos);
-	if (lvl == MPIDR_AFFLVL2)
-		target = *(states + cpu);
+	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
+		target = states[core_pos];
+	}
+	if (lvl == (uint32_t)MPIDR_AFFLVL2) {
+		target = states[cpu];
+	}
 
 	/* CPU suspend */
-	if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) {
+	if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) {
 
 		/* Program default wake mask */
 		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
@@ -183,25 +193,29 @@
 		mce_update_cstate_info(&cstate_info);
 
 		/* Check if CCx state is allowed. */
-		ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
-				TEGRA_ARI_CORE_C7, percpu_data[cpu].wake_time,
-				0);
-		if (ret)
-			return PSTATE_ID_CORE_POWERDN;
+		ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
+				TEGRA_ARI_CORE_C7, tegra_percpu_data[cpu].wake_time,
+				0U);
+		if (ret != 0) {
+			result = PSTATE_ID_CORE_POWERDN;
+		}
 	}
 
 	/* CPU off */
-	if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) {
+	if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) {
 
 		/* find out the number of ON cpus in the cluster */
 		do {
-			target = *states++;
-			if (target != PLAT_MAX_OFF_STATE)
+			target = states[pos];
+			if (target != PLAT_MAX_OFF_STATE) {
 				cluster_powerdn = 0;
-		} while (--ncpu);
+			}
+			--num_cpu;
+			pos++;
+		} while (num_cpu != 0U);
 
 		/* Enable cluster powerdn from last CPU in the cluster */
-		if (cluster_powerdn) {
+		if (cluster_powerdn != 0) {
 
 			/* Enable CC7 state and turn off wake mask */
 			cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
@@ -209,12 +223,13 @@
 			mce_update_cstate_info(&cstate_info);
 
 			/* Check if CCx state is allowed. */
-			ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
+			ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
 						  TEGRA_ARI_CORE_C7,
 						  MCE_CORE_SLEEP_TIME_INFINITE,
-						  0);
-			if (ret)
-				return PSTATE_ID_CORE_POWERDN;
+						  0U);
+			if (ret != 0) {
+				result = PSTATE_ID_CORE_POWERDN;
+			}
 
 		} else {
 
@@ -225,20 +240,21 @@
 	}
 
 	/* System Suspend */
-	if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
-	    (target == PSTATE_ID_SOC_POWERDN))
-		return PSTATE_ID_SOC_POWERDN;
+	if (((lvl == (uint32_t)MPIDR_AFFLVL2) || (lvl == (uint32_t)MPIDR_AFFLVL1)) &&
+	    (target == PSTATE_ID_SOC_POWERDN)) {
+		result = PSTATE_ID_SOC_POWERDN;
+	}
 
 	/* default state */
-	return PSCI_LOCAL_STATE_RUN;
+	return result;
 }
 
-int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
+int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
 {
 	const plat_local_state_t *pwr_domain_state =
 		target_state->pwr_domain_state;
-	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
-	unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
 		TEGRA186_STATE_ID_MASK;
 	uint64_t val;
 
@@ -250,7 +266,7 @@
 		 */
 		val = params_from_bl2->tzdram_base +
 			((uintptr_t)&__tegra186_cpu_reset_handler_end -
-			 (uintptr_t)tegra186_cpu_reset_handler);
+			 (uintptr_t)&tegra186_cpu_reset_handler);
 		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
 			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
 	}
@@ -258,32 +274,51 @@
 	return PSCI_E_SUCCESS;
 }
 
-int tegra_soc_pwr_domain_on(u_register_t mpidr)
+int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
 {
-	uint32_t target_cpu = mpidr & MPIDR_CPU_MASK;
+	uint32_t target_cpu = mpidr & (uint64_t)MPIDR_CPU_MASK;
 	uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
-			MPIDR_AFFINITY_BITS;
+			(uint64_t)MPIDR_AFFINITY_BITS;
+	int32_t ret = PSCI_E_SUCCESS;
 
-	if (target_cluster > MPIDR_AFFLVL1) {
+	if (target_cluster > (uint64_t)MPIDR_AFFLVL1) {
+
 		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
-		return PSCI_E_NOT_PRESENT;
-	}
+		ret = PSCI_E_NOT_PRESENT;
 
-	/* construct the target CPU # */
-	target_cpu |= (target_cluster << 2);
+	} else {
+		/* construct the target CPU # */
+		target_cpu |= (target_cluster << 2);
 
-	mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
+		(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
+	}
 
-	return PSCI_E_SUCCESS;
+	return ret;
 }
 
-int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
 {
-	int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
-	int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
+	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
+	uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
 	mce_cstate_info_t cstate_info = { 0 };
+	uint64_t impl, val;
+	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+
+	impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
 
 	/*
+	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
+	 * A02p and beyond).
+	 */
+	if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
+	    (impl != (uint64_t)DENVER_IMPL)) {
+
+		val = read_l2ctlr_el1();
+		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+		write_l2ctlr_el1(val);
+	}
+
+	/*
 	 * Reset power state info for CPUs when onlining, we set
 	 * deepest power when offlining a core but that may not be
 	 * requested by non-secure sw which controls idle states. It
@@ -328,65 +363,28 @@
 	return PSCI_E_SUCCESS;
 }
 
-int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
 {
-	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+
+	(void)target_state;
 
 	/* Disable Denver's DCO operations */
-	if (impl == DENVER_IMPL)
+	if (impl == DENVER_IMPL) {
 		denver_disable_dco();
+	}
 
 	/* Turn off CPU */
-	(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
-			MCE_CORE_SLEEP_TIME_INFINITE, 0);
+	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
+			MCE_CORE_SLEEP_TIME_INFINITE, 0U);
 
 	return PSCI_E_SUCCESS;
 }
 
 __dead2 void tegra_soc_prepare_system_off(void)
 {
-	mce_cstate_info_t cstate_info = { 0 };
-	uint32_t val;
-
-	if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) {
-
-		/* power off the entire system */
-		mce_enter_ccplex_state(tegra186_system_powerdn_state);
-
-	} else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) {
-
-		/* Prepare for quasi power down */
-		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
-		cstate_info.system = TEGRA_ARI_SYSTEM_SC8;
-		cstate_info.system_state_force = 1;
-		cstate_info.update_wake_mask = 1;
-		mce_update_cstate_info(&cstate_info);
-
-		/* loop until other CPUs power down */
-		do {
-			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
-					TEGRA_ARI_CORE_C7,
-					MCE_CORE_SLEEP_TIME_INFINITE,
-					0);
-		} while (val == 0);
-
-		/* Enter quasi power down state */
-		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
-			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
-
-		/* disable GICC */
-		tegra_gic_cpuif_deactivate();
-
-		/* power down core */
-		prepare_cpu_pwr_dwn();
-
-		/* flush L1/L2 data caches */
-		dcsw_op_all(DCCISW);
-
-	} else {
-		ERROR("%s: unsupported power down state (%d)\n", __func__,
-			tegra186_system_powerdn_state);
-	}
+	/* power off the entire system */
+	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
 
 	wfi();
 
@@ -396,7 +394,7 @@
 	}
 }
 
-int tegra_soc_prepare_system_reset(void)
+int32_t tegra_soc_prepare_system_reset(void)
 {
 	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
 
diff --git a/plat/nvidia/tegra/soc/t186/plat_secondary.c b/plat/nvidia/tegra/soc/t186/plat_secondary.c
index 4485e27..35a403b 100644
--- a/plat/nvidia/tegra/soc/t186/plat_secondary.c
+++ b/plat/nvidia/tegra/soc/t186/plat_secondary.c
@@ -14,14 +14,13 @@
 #include <tegra_def.h>
 #include <tegra_private.h>
 
-#define MISCREG_CPU_RESET_VECTOR	0x2000
-#define MISCREG_AA64_RST_LOW		0x2004
-#define MISCREG_AA64_RST_HIGH		0x2008
+#define MISCREG_AA64_RST_LOW		0x2004U
+#define MISCREG_AA64_RST_HIGH		0x2008U
 
-#define SCRATCH_SECURE_RSV1_SCRATCH_0	0x658
-#define SCRATCH_SECURE_RSV1_SCRATCH_1	0x65C
+#define SCRATCH_SECURE_RSV1_SCRATCH_0	0x658U
+#define SCRATCH_SECURE_RSV1_SCRATCH_1	0x65CU
 
-#define CPU_RESET_MODE_AA64		1
+#define CPU_RESET_MODE_AA64		1U
 
 extern void memcpy16(void *dest, const void *src, unsigned int length);
 
@@ -34,7 +33,7 @@
 void plat_secondary_setup(void)
 {
 	uint32_t addr_low, addr_high;
-	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
 	uint64_t cpu_reset_handler_base;
 
 	INFO("Setting up secondary CPU boot\n");
@@ -58,7 +57,7 @@
 	}
 
 	addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64;
-	addr_high = (uint32_t)((cpu_reset_handler_base >> 32) & 0x7ff);
+	addr_high = (uint32_t)((cpu_reset_handler_base >> 32U) & 0x7ffU);
 
 	/* write lower 32 bits first, then the upper 11 bits */
 	mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low);
@@ -71,5 +70,5 @@
 			addr_high);
 
 	/* update reset vector address to the CCPLEX */
-	mce_update_reset_vector();
+	(void)mce_update_reset_vector();
 }
diff --git a/plat/nvidia/tegra/soc/t186/plat_setup.c b/plat/nvidia/tegra/soc/t186/plat_setup.c
index 15dbd16..bbd19c1 100644
--- a/plat/nvidia/tegra/soc/t186/plat_setup.c
+++ b/plat/nvidia/tegra/soc/t186/plat_setup.c
@@ -27,15 +27,12 @@
 #include <tegra_platform.h>
 #include <tegra_private.h>
 
-DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A57_L2CTLR_EL1)
-extern uint64_t tegra_enable_l2_ecc_parity_prot;
-
 /*******************************************************************************
  * Tegra186 CPU numbers in cluster #0
  *******************************************************************************
  */
-#define TEGRA186_CLUSTER0_CORE2		2
-#define TEGRA186_CLUSTER0_CORE3		3
+#define TEGRA186_CLUSTER0_CORE2		2U
+#define TEGRA186_CLUSTER0_CORE3		3U
 
 /*******************************************************************************
  * The Tegra power domain tree has a single system level power domain i.e. a
@@ -43,7 +40,7 @@
  * the number of power domains at the highest power level.
  *******************************************************************************
  */
-const unsigned char tegra_power_domain_tree_desc[] = {
+const uint8_t tegra_power_domain_tree_desc[] = {
 	/* No of root nodes */
 	1,
 	/* No of clusters */
@@ -54,45 +51,53 @@
 	PLATFORM_MAX_CPUS_PER_CLUSTER
 };
 
+/*******************************************************************************
+ * This function returns the Tegra default topology tree information.
+ ******************************************************************************/
+const uint8_t *plat_get_power_domain_tree_desc(void)
+{
+	return tegra_power_domain_tree_desc;
+}
+
 /*
  * Table of regions to map using the MMU.
  */
 static const mmap_region_t tegra_mmap[] = {
-	MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000, /* 128KB */
+	MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000U, /* 128KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000, /* 128KB - UART A, B*/
+	MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000U, /* 128KB - UART A, B*/
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000, /* 128KB - UART C, G */
+	MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000U, /* 128KB - UART C, G */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000, /* 192KB - UART D, E, F */
+	MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000U, /* 192KB - UART D, E, F */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000, /* 128KB */
+	MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000U, /* 128KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000, /* 256KB */
+	MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000U, /* 256KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000, /* 384KB */
+	MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000U, /* 384KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000, /* 128KB - ARM/Denver */
+	MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000U, /* 128KB - ARM/Denver */
 			MT_DEVICE | MT_RW | MT_SECURE),
-	MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000, /* 64KB */
+	MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000U, /* 64KB */
 			MT_DEVICE | MT_RW | MT_SECURE),
 	{0}
 };
@@ -109,7 +114,7 @@
 /*******************************************************************************
  * Handler to get the System Counter Frequency
  ******************************************************************************/
-unsigned int plat_get_syscnt_freq2(void)
+uint32_t plat_get_syscnt_freq2(void)
 {
 	return 31250000;
 }
@@ -136,56 +141,42 @@
 /*******************************************************************************
  * Retrieve the UART controller base to be used as the console
  ******************************************************************************/
-uint32_t plat_get_console_from_id(int id)
+uint32_t plat_get_console_from_id(int32_t id)
 {
-	if (id > TEGRA186_MAX_UART_PORTS)
-		return 0;
+	uint32_t ret;
 
-	return tegra186_uart_addresses[id];
-}
+	if (id > TEGRA186_MAX_UART_PORTS) {
+		ret = 0;
+	} else {
+		ret = tegra186_uart_addresses[id];
+	}
 
-/* represent chip-version as concatenation of major (15:12), minor (11:8) and subrev (7:0) */
-#define TEGRA186_VER_A02P	0x1201
+	return ret;
+}
 
 /*******************************************************************************
  * Handler for early platform setup
  ******************************************************************************/
 void plat_early_platform_setup(void)
 {
-	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
-	uint32_t chip_subrev, val;
+	uint64_t impl, val;
+	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
 
 	/* sanity check MCE firmware compatibility */
 	mce_verify_firmware_version();
 
+	impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+
 	/*
-	 * Enable ECC and Parity Protection for Cortex-A57 CPUs
-	 * for Tegra A02p SKUs
+	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
+	 * A02p and beyond).
 	 */
-	if (impl != DENVER_IMPL) {
-
-		/* get the major, minor and sub-version values */
-		chip_subrev = mmio_read_32(TEGRA_FUSE_BASE + OPT_SUBREVISION) &
-			      SUBREVISION_MASK;
+	if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
+	    (impl != (uint64_t)DENVER_IMPL)) {
 
-		/* prepare chip version number */
-		val = (tegra_get_chipid_major() << 12) |
-		      (tegra_get_chipid_minor() << 8) |
-		       chip_subrev;
-
-		/* enable L2 ECC for Tegra186 A02P and beyond */
-		if (val >= TEGRA186_VER_A02P) {
-
-			val = read_l2ctlr_el1();
-			val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
-			write_l2ctlr_el1(val);
-
-			/*
-			 * Set the flag to enable ECC/Parity Protection
-			 * when we exit System Suspend or Cluster Powerdn
-			 */
-			tegra_enable_l2_ecc_parity_prot = 1;
-		}
+		val = read_l2ctlr_el1();
+		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+		write_l2ctlr_el1(val);
 	}
 }
 
@@ -208,8 +199,9 @@
 	 * Initialize the FIQ handler only if the platform supports any
 	 * FIQ interrupt sources.
 	 */
-	if (sizeof(tegra186_interrupt_props) > 0)
+	if (sizeof(tegra186_interrupt_props) > 0U) {
 		tegra_fiq_handler_setup();
+	}
 }
 
 /*******************************************************************************
@@ -242,33 +234,34 @@
  * to convert an MPIDR to a unique linear index. An error code (-1) is returned
  * in case the MPIDR is invalid.
  ******************************************************************************/
-int plat_core_pos_by_mpidr(u_register_t mpidr)
+int32_t plat_core_pos_by_mpidr(u_register_t mpidr)
 {
-	unsigned int cluster_id, cpu_id, pos;
+	u_register_t cluster_id, cpu_id, pos;
+	int32_t ret;
 
-	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
-	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+	cluster_id = (mpidr >> (u_register_t)MPIDR_AFF1_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> (u_register_t)MPIDR_AFF0_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
 
 	/*
 	 * Validate cluster_id by checking whether it represents
 	 * one of the two clusters present on the platform.
-	 */
-	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
-		return PSCI_E_NOT_PRESENT;
-
-	/*
 	 * Validate cpu_id by checking whether it represents a CPU in
 	 * one of the two clusters present on the platform.
 	 */
-	if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
-		return PSCI_E_NOT_PRESENT;
+	if ((cluster_id >= (u_register_t)PLATFORM_CLUSTER_COUNT) ||
+	    (cpu_id >= (u_register_t)PLATFORM_MAX_CPUS_PER_CLUSTER)) {
+		ret = PSCI_E_NOT_PRESENT;
+	} else {
+		/* calculate the core position */
+		pos = cpu_id + (cluster_id << 2U);
 
-	/* calculate the core position */
-	pos = cpu_id + (cluster_id << 2);
-
-	/* check for non-existent CPUs */
-	if (pos == TEGRA186_CLUSTER0_CORE2 || pos == TEGRA186_CLUSTER0_CORE3)
-		return PSCI_E_NOT_PRESENT;
+		/* check for non-existent CPUs */
+		if ((pos == TEGRA186_CLUSTER0_CORE2) || (pos == TEGRA186_CLUSTER0_CORE3)) {
+			ret = PSCI_E_NOT_PRESENT;
+		} else {
+			ret = (int32_t)pos;
+		}
+	}
 
-	return pos;
+	return ret;
 }
diff --git a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
index bf98fcf..955029e 100644
--- a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
+++ b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
@@ -20,8 +20,6 @@
 #include <t18x_ari.h>
 #include <tegra_private.h>
 
-extern uint32_t tegra186_system_powerdn_state;
-
 /*******************************************************************************
  * Offset to read the ref_clk counter value
  ******************************************************************************/
@@ -30,7 +28,6 @@
 /*******************************************************************************
  * Tegra186 SiP SMCs
  ******************************************************************************/
-#define TEGRA_SIP_SYSTEM_SHUTDOWN_STATE			0xC2FFFE01
 #define TEGRA_SIP_GET_ACTMON_CLK_COUNTERS		0xC2FFFE02
 #define TEGRA_SIP_MCE_CMD_ENTER_CSTATE			0xC2FFFF00
 #define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO		0xC2FFFF01
@@ -60,7 +57,7 @@
 		     uint64_t x2,
 		     uint64_t x3,
 		     uint64_t x4,
-		     void *cookie,
+		     const void *cookie,
 		     void *handle,
 		     uint64_t flags)
 {
@@ -115,33 +112,6 @@
 
 		return 0;
 
-	case TEGRA_SIP_SYSTEM_SHUTDOWN_STATE:
-
-		/* clean up the high bits */
-		x1 = (uint32_t)x1;
-
-		/*
-		 * SC8 is a special Tegra186 system state where the CPUs and
-		 * DRAM are powered down but the other subsystem is still
-		 * alive.
-		 */
-		if ((x1 == TEGRA_ARI_SYSTEM_SC8) ||
-		    (x1 == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF)) {
-
-			tegra186_system_powerdn_state = x1;
-			flush_dcache_range(
-				(uintptr_t)&tegra186_system_powerdn_state,
-				sizeof(tegra186_system_powerdn_state));
-
-		} else {
-
-			ERROR("%s: unhandled powerdn state (%d)\n", __func__,
-				(uint32_t)x1);
-			return -ENOTSUP;
-		}
-
-		return 0;
-
 	/*
 	 * This function ID reads the Activity monitor's core/ref clock
 	 * counter values for a core/cluster.
diff --git a/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h b/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h
new file mode 100644
index 0000000..0157747
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SE_PRIVATE_H
+#define SE_PRIVATE_H
+
+#include <stdbool.h>
+#include <security_engine.h>
+
+/*
+ * PMC registers
+ */
+
+/* Secure scratch registers */
+#define PMC_SECURE_SCRATCH4_OFFSET      	0xC0U
+#define PMC_SECURE_SCRATCH5_OFFSET      	0xC4U
+#define PMC_SECURE_SCRATCH6_OFFSET      	0x224U
+#define PMC_SECURE_SCRATCH7_OFFSET      	0x228U
+#define PMC_SECURE_SCRATCH120_OFFSET    	0xB38U
+#define PMC_SECURE_SCRATCH121_OFFSET    	0xB3CU
+#define PMC_SECURE_SCRATCH122_OFFSET    	0xB40U
+#define PMC_SECURE_SCRATCH123_OFFSET    	0xB44U
+
+/*
+ * AHB arbitration memory write queue
+ */
+#define ARAHB_MEM_WRQUE_MST_ID_OFFSET		0xFCU
+#define ARAHB_MST_ID_SE2_MASK			(0x1U << 13)
+#define ARAHB_MST_ID_SE_MASK			(0x1U << 14)
+
+/* SE Status register */
+#define SE_STATUS_OFFSET			0x800U
+#define SE_STATUS_SHIFT				0
+#define SE_STATUS_IDLE	\
+		((0U) << SE_STATUS_SHIFT)
+#define SE_STATUS_BUSY	\
+		((1U) << SE_STATUS_SHIFT)
+#define SE_STATUS(x)	\
+		((x) & ((0x3U) << SE_STATUS_SHIFT))
+
+/* SE config register */
+#define SE_CONFIG_REG_OFFSET    		0x14U
+#define SE_CONFIG_ENC_ALG_SHIFT 		12
+#define SE_CONFIG_ENC_ALG_AES_ENC	\
+		((1U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG_RNG	\
+		((2U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG_SHA	\
+		((3U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG_RSA	\
+		((4U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG_NOP	\
+		((0U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG(x)	\
+		((x) & ((0xFU) << SE_CONFIG_ENC_ALG_SHIFT))
+
+#define SE_CONFIG_DEC_ALG_SHIFT 		8
+#define SE_CONFIG_DEC_ALG_AES	\
+		((1U) << SE_CONFIG_DEC_ALG_SHIFT)
+#define SE_CONFIG_DEC_ALG_NOP	\
+		((0U) << SE_CONFIG_DEC_ALG_SHIFT)
+#define SE_CONFIG_DEC_ALG(x)	\
+		((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT))
+
+#define SE_CONFIG_DST_SHIFT     		2
+#define SE_CONFIG_DST_MEMORY	\
+		((0U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST_HASHREG	\
+		((1U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST_KEYTAB	\
+		((2U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST_SRK	\
+		((3U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST_RSAREG	\
+		((4U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST(x)	\
+		((x) & ((0x7U) << SE_CONFIG_DST_SHIFT))
+
+/* DRBG random number generator config */
+#define SE_RNG_CONFIG_REG_OFFSET		0x340
+
+#define DRBG_MODE_SHIFT				0
+#define DRBG_MODE_NORMAL		\
+		((0UL) << DRBG_MODE_SHIFT)
+#define DRBG_MODE_FORCE_INSTANTION  \
+		((1UL) << DRBG_MODE_SHIFT)
+#define DRBG_MODE_FORCE_RESEED	  \
+		((2UL) << DRBG_MODE_SHIFT)
+#define SE_RNG_CONFIG_MODE(x)   \
+		((x) & ((0x3UL) << DRBG_MODE_SHIFT))
+
+#define DRBG_SRC_SHIFT				2
+#define DRBG_SRC_NONE	   \
+		((0UL) << DRBG_SRC_SHIFT)
+#define DRBG_SRC_ENTROPY	\
+		((1UL) << DRBG_SRC_SHIFT)
+#define DRBG_SRC_LFSR	   \
+		((2UL) << DRBG_SRC_SHIFT)
+#define SE_RNG_SRC_CONFIG_MODE(x)   \
+		((x) & ((0x3UL) << DRBG_SRC_SHIFT))
+
+/* DRBG random number generator entropy config */
+#define SE_RNG_SRC_CONFIG_REG_OFFSET		0x344U
+
+#define DRBG_RO_ENT_SRC_SHIFT       		1
+#define DRBG_RO_ENT_SRC_ENABLE	\
+		((1U) << DRBG_RO_ENT_SRC_SHIFT)
+#define DRBG_RO_ENT_SRC_DISABLE	\
+		((0U) << DRBG_RO_ENT_SRC_SHIFT)
+#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x)	\
+		((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT))
+
+#define DRBG_RO_ENT_SRC_LOCK_SHIFT  		0
+#define DRBG_RO_ENT_SRC_LOCK_ENABLE	\
+		((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
+#define DRBG_RO_ENT_SRC_LOCK_DISABLE	\
+		((0U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
+#define SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK(x)	\
+		((x) & ((0x1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT))
+
+#define DRBG_RO_ENT_IGNORE_MEM_SHIFT		12
+#define DRBG_RO_ENT_IGNORE_MEM_ENABLE	\
+		((1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)
+#define DRBG_RO_ENT_IGNORE_MEM_DISABLE	\
+		((0U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)
+#define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x)	\
+		((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT))
+
+/* SE OPERATION */
+#define SE_OPERATION_REG_OFFSET 		0x8U
+#define SE_OPERATION_SHIFT      		0
+#define SE_OP_ABORT	\
+		((0x0U) << SE_OPERATION_SHIFT)
+#define SE_OP_START	\
+		((0x1U) << SE_OPERATION_SHIFT)
+#define SE_OP_RESTART	\
+		((0x2U) << SE_OPERATION_SHIFT)
+#define SE_OP_CTX_SAVE	\
+		((0x3U) << SE_OPERATION_SHIFT)
+#define SE_OP_RESTART_IN	\
+		((0x4U) << SE_OPERATION_SHIFT)
+#define SE_OPERATION(x)	\
+		((x) & ((0x7U) << SE_OPERATION_SHIFT))
+
+/* SE_CTX_SAVE_AUTO */
+#define SE_CTX_SAVE_AUTO_REG_OFFSET 		0x74U
+
+/* Enable */
+#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT  		0
+#define SE_CTX_SAVE_AUTO_DIS	\
+		((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
+#define SE_CTX_SAVE_AUTO_EN	\
+		((1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
+#define SE_CTX_SAVE_AUTO_ENABLE(x)	\
+		((x) & ((0x1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT))
+
+/* Lock */
+#define SE_CTX_SAVE_AUTO_LOCK_SHIFT 		8
+#define SE_CTX_SAVE_AUTO_LOCK_EN	\
+		((1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)
+#define SE_CTX_SAVE_AUTO_LOCK_DIS	\
+		((0U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)
+#define SE_CTX_SAVE_AUTO_LOCK(x)	\
+		((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT))
+
+/* Current context save number of blocks  */
+#define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT		16
+#define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 		0x3FFU
+#define SE_CTX_SAVE_GET_BLK_COUNT(x)	\
+		(((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \
+		SE_CTX_SAVE_AUTO_CURR_CNT_MASK)
+
+#define SE_CTX_SAVE_SIZE_BLOCKS_SE1      	133
+#define SE_CTX_SAVE_SIZE_BLOCKS_SE2     	646
+
+/* SE TZRAM OPERATION - only for SE1 */
+#define SE_TZRAM_OPERATION      		0x540U
+
+#define SE_TZRAM_OP_MODE_SHIFT  		1
+#define SE_TZRAM_OP_MODE_SAVE		\
+		((0U) << SE_TZRAM_OP_MODE_SHIFT)
+#define SE_TZRAM_OP_MODE_RESTORE	\
+		((1U) << SE_TZRAM_OP_MODE_SHIFT)
+#define SE_TZRAM_OP_MODE(x)		\
+		((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT))
+
+#define SE_TZRAM_OP_BUSY_SHIFT  		2
+#define SE_TZRAM_OP_BUSY_OFF	\
+		((0U) << SE_TZRAM_OP_BUSY_SHIFT)
+#define SE_TZRAM_OP_BUSY_ON	\
+		((1U) << SE_TZRAM_OP_BUSY_SHIFT)
+#define SE_TZRAM_OP_BUSY(x)	\
+		((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT))
+
+#define SE_TZRAM_OP_REQ_SHIFT  			0
+#define SE_TZRAM_OP_REQ_IDLE	\
+		((0U) << SE_TZRAM_OP_REQ_SHIFT)
+#define SE_TZRAM_OP_REQ_INIT	\
+		((1U) << SE_TZRAM_OP_REQ_SHIFT)
+#define SE_TZRAM_OP_REQ(x)	\
+		((x) & ((0x1U) << SE_TZRAM_OP_REQ_SHIFT))
+
+/* SE Interrupt */
+#define SE_INT_STATUS_REG_OFFSET		0x10U
+#define SE_INT_OP_DONE_SHIFT    		4
+#define SE_INT_OP_DONE_CLEAR	\
+		((0U) << SE_INT_OP_DONE_SHIFT)
+#define SE_INT_OP_DONE_ACTIVE	\
+		((1U) << SE_INT_OP_DONE_SHIFT)
+#define SE_INT_OP_DONE(x)	\
+		((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT))
+
+/* SE error status */
+#define SE_ERR_STATUS_REG_OFFSET		0x804U
+
+/* SE linked list (LL) register */
+#define SE_IN_LL_ADDR_REG_OFFSET		0x18U
+#define SE_OUT_LL_ADDR_REG_OFFSET  		0x24U
+#define SE_BLOCK_COUNT_REG_OFFSET  		0x318U
+
+/* AES data sizes */
+#define TEGRA_SE_AES_BLOCK_SIZE 		16
+#define TEGRA_SE_AES_MIN_KEY_SIZE  		16
+#define TEGRA_SE_AES_MAX_KEY_SIZE  		32
+#define TEGRA_SE_AES_IV_SIZE    		16
+
+/*******************************************************************************
+ * Inline functions definition
+ ******************************************************************************/
+
+static inline uint32_t tegra_se_read_32(const tegra_se_dev_t *dev, uint32_t offset)
+{
+	return mmio_read_32(dev->se_base + offset);
+}
+
+static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset, uint32_t val)
+{
+	mmio_write_32(dev->se_base + offset, val);
+}
+
+/*******************************************************************************
+ * Prototypes
+ ******************************************************************************/
+
+#endif /* SE_PRIVATE_H */
diff --git a/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c b/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c
new file mode 100644
index 0000000..fa99db6
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <mmio.h>
+#include <psci.h>
+#include <se_private.h>
+#include <security_engine.h>
+#include <tegra_platform.h>
+
+/*******************************************************************************
+ * Constants and Macros
+ ******************************************************************************/
+
+#define TIMEOUT_100MS	100UL	// Timeout in 100ms
+
+/*******************************************************************************
+ * Data structure and global variables
+ ******************************************************************************/
+
+/* The security engine contexts are formatted as follows:
+ *
+ * SE1 CONTEXT:
+ * #--------------------------------#
+ * |        Random Data   1 Block   |
+ * #--------------------------------#
+ * |        Sticky Bits   2 Blocks  |
+ * #--------------------------------#
+ * | Key Table           64 Blocks  |
+ * |     For each Key (x16):        |
+ * |      Key:         2 Blocks     |
+ * |      Original-IV: 1 Block      |
+ * |      Updated-IV:  1 Block      |
+ * #--------------------------------#
+ * |        RSA Keys     64 Blocks  |
+ * #--------------------------------#
+ * |        Known Pattern 1 Block   |
+ * #--------------------------------#
+ *
+ * SE2/PKA1 CONTEXT:
+ * #--------------------------------#
+ * |        Random Data   1 Block   |
+ * #--------------------------------#
+ * |        Sticky Bits   2 Blocks  |
+ * #--------------------------------#
+ * | Key Table           64 Blocks  |
+ * |     For each Key (x16):        |
+ * |      Key:         2 Blocks     |
+ * |      Original-IV: 1 Block      |
+ * |      Updated-IV:  1 Block      |
+ * #--------------------------------#
+ * |        RSA Keys     64 Blocks  |
+ * #--------------------------------#
+ * |        PKA sticky bits 1 Block |
+ * #--------------------------------#
+ * |        PKA keys    512 Blocks  |
+ * #--------------------------------#
+ * |        Known Pattern 1 Block   |
+ * #--------------------------------#
+ */
+
+/* SE input and output linked list buffers */
+static tegra_se_io_lst_t se1_src_ll_buf;
+static tegra_se_io_lst_t se1_dst_ll_buf;
+
+/* SE2 input and output linked list buffers */
+static tegra_se_io_lst_t se2_src_ll_buf;
+static tegra_se_io_lst_t se2_dst_ll_buf;
+
+/* SE1 security engine device handle */
+static tegra_se_dev_t se_dev_1 = {
+	.se_num = 1,
+	/* setup base address for se */
+	.se_base = TEGRA_SE1_BASE,
+	/* Setup context size in AES blocks */
+	.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
+	/* Setup SRC buffers for SE operations */
+	.src_ll_buf = &se1_src_ll_buf,
+	/* Setup DST buffers for SE operations */
+	.dst_ll_buf = &se1_dst_ll_buf,
+};
+
+/* SE2 security engine device handle */
+static tegra_se_dev_t se_dev_2 = {
+	.se_num = 2,
+	/* setup base address for se */
+	.se_base = TEGRA_SE2_BASE,
+	/* Setup context size in AES blocks */
+	.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
+	/* Setup SRC buffers for SE operations */
+	.src_ll_buf = &se2_src_ll_buf,
+	/* Setup DST buffers for SE operations */
+	.dst_ll_buf = &se2_dst_ll_buf,
+};
+
+/*******************************************************************************
+ * Functions Definition
+ ******************************************************************************/
+
+static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
+{
+	flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
+			sizeof(tegra_se_io_lst_t));
+	flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
+			sizeof(tegra_se_io_lst_t));
+}
+
+/*
+ * Check that SE operation has completed after kickoff
+ * This function is invoked after an SE operation has been started,
+ * and it checks the following conditions:
+ * 1. SE_INT_STATUS = SE_OP_DONE
+ * 2. SE_STATUS = IDLE
+ * 3. AHB bus data transfer complete.
+ * 4. SE_ERR_STATUS is clean.
+ */
+static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
+{
+	uint32_t val = 0;
+	int32_t ret = 0;
+	uint32_t timeout;
+
+	/* Poll the SE interrupt register to ensure H/W operation complete */
+	val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
+	for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
+			(timeout < TIMEOUT_100MS); timeout++) {
+		mdelay(1);
+		val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
+	}
+
+	if (timeout == TIMEOUT_100MS) {
+		ERROR("%s: ERR: Atomic context save operation timeout!\n",
+				__func__);
+		ret = -ETIMEDOUT;
+	}
+
+	/* Poll the SE status idle to ensure H/W operation complete */
+	if (ret == 0) {
+		val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
+		for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
+				timeout++) {
+			mdelay(1);
+			val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
+		}
+
+		if (timeout == TIMEOUT_100MS) {
+			ERROR("%s: ERR: MEM_INTERFACE and SE state "
+					"idle state timeout.\n", __func__);
+			ret = -ETIMEDOUT;
+		}
+	}
+
+	/* Check AHB bus transfer complete */
+	if (ret == 0) {
+		val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
+		for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
+				(timeout < TIMEOUT_100MS); timeout++) {
+			mdelay(1);
+			val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
+		}
+
+		if (timeout == TIMEOUT_100MS) {
+			ERROR("%s: SE write over AHB timeout.\n", __func__);
+			ret = -ETIMEDOUT;
+		}
+	}
+
+	/* Ensure that no errors are thrown during operation */
+	if (ret == 0) {
+		val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
+		if (val != 0U) {
+			ERROR("%s: error during SE operation! 0x%x", __func__, val);
+			ret = -ENOTSUP;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Verify the SE context save auto has been enabled.
+ * SE_CTX_SAVE_AUTO.ENABLE == ENABLE
+ * If the SE context save auto is not enabled, then set
+ * the context save auto enable and lock the setting.
+ * If the SE context save auto is not enabled and the
+ * enable setting is locked, then return an error.
+ */
+static inline int32_t tegra_se_ctx_save_auto_enable(const tegra_se_dev_t *se_dev)
+{
+	uint32_t val;
+	int32_t ret = 0;
+
+	val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
+	if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_DIS) {
+		if (SE_CTX_SAVE_AUTO_LOCK(val) == SE_CTX_SAVE_AUTO_LOCK_EN) {
+			ERROR("%s: ERR: Cannot enable atomic. Write locked!\n",
+					__func__);
+			ret = -EACCES;
+		}
+
+		/* Program SE_CTX_SAVE_AUTO */
+		if (ret == 0) {
+			tegra_se_write_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET,
+					SE_CTX_SAVE_AUTO_LOCK_EN |
+					SE_CTX_SAVE_AUTO_EN);
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Wait for SE engine to be idle and clear pending interrupts before
+ * starting the next SE operation.
+ */
+static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
+{
+	int32_t ret = 0;
+	uint32_t val = 0;
+	uint32_t timeout;
+
+	/* Wait for previous operation to finish */
+	val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
+	for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
+		mdelay(1);
+		val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
+	}
+
+	if (timeout == TIMEOUT_100MS) {
+		ERROR("%s: ERR: SE status is not idle!\n", __func__);
+		ret = -ETIMEDOUT;
+	}
+
+	/* Clear any pending interrupts from  previous operation */
+	val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
+	tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
+	return ret;
+}
+
+/*
+ * SE atomic context save. At SC7 entry, SE driver triggers the
+ * hardware automatically performs the context save operation.
+ */
+static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
+{
+	int32_t ret = 0;
+	uint32_t val = 0;
+	uint32_t blk_count_limit = 0;
+	uint32_t block_count;
+
+	/* Check that previous operation is finalized */
+	ret = tegra_se_operation_prepare(se_dev);
+
+	/* Ensure HW atomic context save has been enabled
+	 * This should have been done at boot time.
+	 * SE_CTX_SAVE_AUTO.ENABLE == ENABLE
+	 */
+	if (ret == 0) {
+		ret = tegra_se_ctx_save_auto_enable(se_dev);
+	}
+
+	/* Read the context save progress counter: block_count
+	 * Ensure no previous context save has been triggered
+	 * SE_CTX_SAVE_AUTO.CURR_CNT == 0
+	 */
+	if (ret == 0) {
+		val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
+		block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
+		if (block_count != 0U) {
+			ERROR("%s: ctx_save triggered multiple times\n",
+					__func__);
+			ret = -EALREADY;
+		}
+	}
+
+	/* Set the destination block count when the context save complete */
+	if (ret == 0) {
+		blk_count_limit = block_count + se_dev->ctx_size_blks;
+	}
+
+	/* Program SE_CONFIG register as for RNG operation
+	 * SE_CONFIG.ENC_ALG = RNG
+	 * SE_CONFIG.DEC_ALG = NOP
+	 * SE_CONFIG.ENC_MODE is ignored
+	 * SE_CONFIG.DEC_MODE is ignored
+	 * SE_CONFIG.DST = MEMORY
+	 */
+	if (ret == 0) {
+		val = (SE_CONFIG_ENC_ALG_RNG |
+			SE_CONFIG_DEC_ALG_NOP |
+			SE_CONFIG_DST_MEMORY);
+		tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
+
+		tegra_se_make_data_coherent(se_dev);
+
+		/* SE_CTX_SAVE operation */
+		tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
+				SE_OP_CTX_SAVE);
+
+		ret = tegra_se_operation_complete(se_dev);
+	}
+
+	/* Check that context has written the correct number of blocks */
+	if (ret == 0) {
+		val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
+		if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
+			ERROR("%s: expected %d blocks but %d were written\n",
+					__func__, blk_count_limit, val);
+			ret = -ECANCELED;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Security engine primitive operations, including normal operation
+ * and the context save operation.
+ */
+static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
+{
+	uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
+	int ret = 0;
+
+	assert(se_dev);
+
+	/* Use device buffers for in and out */
+	tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf)));
+	tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf)));
+
+	/* Check that previous operation is finalized */
+	ret = tegra_se_operation_prepare(se_dev);
+	if (ret != 0) {
+		goto op_error;
+	}
+
+	/* Program SE operation size */
+	if (nblocks) {
+		tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1);
+	}
+
+	/* Make SE LL data coherent before the SE operation */
+	tegra_se_make_data_coherent(se_dev);
+
+	/* Start hardware operation */
+	tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
+
+	/* Wait for operation to finish */
+	ret = tegra_se_operation_complete(se_dev);
+
+op_error:
+	return ret;
+}
+
+/*
+ * Security Engine sequence to generat SRK
+ * SE and SE2 will generate different SRK by different
+ * entropy seeds.
+ */
+static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
+{
+	int ret = PSCI_E_INTERN_FAIL;
+	uint32_t val;
+
+	/* Confgure the following hardware register settings:
+	 * SE_CONFIG.DEC_ALG = NOP
+	 * SE_CONFIG.ENC_ALG = RNG
+	 * SE_CONFIG.DST = SRK
+	 * SE_OPERATION.OP = START
+	 * SE_CRYPTO_LAST_BLOCK = 0
+	 */
+	se_dev->src_ll_buf->last_buff_num = 0;
+	se_dev->dst_ll_buf->last_buff_num = 0;
+
+	/* Configure random number generator */
+	val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
+	tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
+
+	/* Configure output destination = SRK */
+	val = (SE_CONFIG_ENC_ALG_RNG |
+		SE_CONFIG_DEC_ALG_NOP |
+		SE_CONFIG_DST_SRK);
+	tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
+
+	/* Perform hardware operation */
+	ret = tegra_se_perform_operation(se_dev, 0);
+
+	return ret;
+}
+
+/*
+ * Initialize the SE engine handle
+ */
+void tegra_se_init(void)
+{
+	INFO("%s: start SE init\n", __func__);
+
+	/* Generate random SRK to initialize DRBG */
+	tegra_se_generate_srk(&se_dev_1);
+	tegra_se_generate_srk(&se_dev_2);
+
+	INFO("%s: SE init done\n", __func__);
+}
+
+/*
+ * Security engine power suspend entry point.
+ * This function is invoked from PSCI power domain suspend handler.
+ */
+int32_t tegra_se_suspend(void)
+{
+	int32_t ret = 0;
+
+	/* Atomic context save se2 and pka1 */
+	INFO("%s: SE2/PKA1 atomic context save\n", __func__);
+	ret = tegra_se_context_save_atomic(&se_dev_2);
+
+	/* Atomic context save se */
+	if (ret == 0) {
+		INFO("%s: SE1 atomic context save\n", __func__);
+		ret = tegra_se_context_save_atomic(&se_dev_1);
+	}
+
+	if (ret == 0) {
+		INFO("%s: SE atomic context save done\n", __func__);
+	}
+
+	return ret;
+}
+
+/*
+ * Save TZRAM to shadow TZRAM in AON
+ */
+int32_t tegra_se_save_tzram(void)
+{
+	uint32_t val = 0;
+	int32_t ret = 0;
+	uint32_t timeout;
+
+	INFO("%s: SE TZRAM save start\n", __func__);
+
+	val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
+	tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
+
+	val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
+	for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
+			(timeout < TIMEOUT_100MS); timeout++) {
+		mdelay(1);
+		val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
+	}
+
+	if (timeout == TIMEOUT_100MS) {
+		ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
+		ret = -ETIMEDOUT;
+	}
+
+	if (ret == 0) {
+		INFO("%s: SE TZRAM save done!\n", __func__);
+	}
+
+	return ret;
+}
+
+/*
+ * The function is invoked by SE resume
+ */
+static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
+{
+	uint32_t val;
+
+	assert(se_dev);
+
+	/* Lock RNG source to ENTROPY on resume */
+	val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
+		DRBG_RO_ENT_SRC_LOCK_ENABLE |
+		DRBG_RO_ENT_SRC_ENABLE;
+	tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
+
+	/* Enable and lock the SE atomic context save setting */
+	if (tegra_se_ctx_save_auto_enable(se_dev) != 0) {
+		ERROR("%s: ERR: enable SE%d context save auto failed!\n",
+			__func__, se_dev->se_num);
+	}
+
+	/* Set a random value to SRK to initialize DRBG */
+	tegra_se_generate_srk(se_dev);
+}
+
+/*
+ * The function is invoked on SC7 resume
+ */
+void tegra_se_resume(void)
+{
+	tegra_se_warm_boot_resume(&se_dev_1);
+	tegra_se_warm_boot_resume(&se_dev_2);
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
index 27786d3..ed30ff4 100644
--- a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
+++ b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
@@ -1,13 +1,11 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <assert.h>
-
-#include <platform_def.h>
-
+#include <cortex_a57.h>
 #include <arch_helpers.h>
 #include <common/debug.h>
 #include <drivers/delay_timer.h>
@@ -15,10 +13,14 @@
 #include <lib/psci/psci.h>
 #include <plat/common/platform.h>
 
+#include <bpmp.h>
 #include <flowctrl.h>
 #include <pmc.h>
+#include <platform_def.h>
+#include <security_engine.h>
 #include <tegra_def.h>
 #include <tegra_private.h>
+#include <tegra_platform.h>
 
 /*
  * Register used to clear CPU reset signals. Each CPU has two reset
@@ -55,7 +57,7 @@
 		 * Cluster powerdown/idle request only for afflvl 1
 		 */
 		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
-		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PSTATE_ID_CORE_POWERDN;
 
 		break;
 
@@ -87,9 +89,11 @@
 					     const plat_local_state_t *states,
 					     unsigned int ncpu)
 {
-	plat_local_state_t target = *states;
+	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
 	int cpu = plat_my_core_pos();
 	int core_pos = read_mpidr() & MPIDR_CPU_MASK;
+	uint32_t bpmp_reply, data[3];
+	int ret;
 
 	/* get the power state at this level */
 	if (lvl == MPIDR_AFFLVL1)
@@ -97,19 +101,57 @@
 	if (lvl == MPIDR_AFFLVL2)
 		target = *(states + cpu);
 
-	/* Cluster idle/power-down */
-	if ((lvl == MPIDR_AFFLVL1) && ((target == PSTATE_ID_CLUSTER_IDLE) ||
-	    (target == PSTATE_ID_CLUSTER_POWERDN))) {
-		return target;
-	}
+	if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CLUSTER_IDLE)) {
+
+		/* initialize the bpmp interface */
+		(void)tegra_bpmp_init();
+
+		/* Cluster idle */
+		data[0] = (uint32_t)cpu;
+		data[1] = TEGRA_PM_CC6;
+		data[2] = TEGRA_PM_SC1;
+		ret = tegra_bpmp_send_receive_atomic(MRQ_DO_IDLE,
+				(void *)&data, (int)sizeof(data),
+				(void *)&bpmp_reply, (int)sizeof(bpmp_reply));
 
-	/* System Suspend */
-	if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
-	    (target == PSTATE_ID_SOC_POWERDN))
-		return PSTATE_ID_SOC_POWERDN;
+		/* check if cluster idle entry is allowed */
+		if ((ret != 0L) || (bpmp_reply != BPMP_CCx_ALLOWED)) {
 
-	/* default state */
-	return PSCI_LOCAL_STATE_RUN;
+			/* Cluster idle not allowed */
+			target = PSCI_LOCAL_STATE_RUN;
+		}
+
+	} else if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CLUSTER_POWERDN)) {
+
+		/* initialize the bpmp interface */
+		(void)tegra_bpmp_init();
+
+		/* Cluster power-down */
+		data[0] = (uint32_t)cpu;
+		data[1] = TEGRA_PM_CC7;
+		data[2] = TEGRA_PM_SC1;
+		ret = tegra_bpmp_send_receive_atomic(MRQ_DO_IDLE,
+				(void *)&data, (int)sizeof(data),
+				(void *)&bpmp_reply, (int)sizeof(bpmp_reply));
+
+		/* check if cluster power down is allowed */
+		if ((ret != 0L) || (bpmp_reply != BPMP_CCx_ALLOWED)) {
+
+			/* Cluster power down not allowed */
+			target = PSCI_LOCAL_STATE_RUN;
+		}
+
+	} else if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
+	    (target == PSTATE_ID_SOC_POWERDN)) {
+
+		/* System Suspend */
+		target = PSTATE_ID_SOC_POWERDN;
+
+	} else {
+		; /* do nothing */
+	}
+
+	return target;
 }
 
 int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
@@ -120,27 +162,43 @@
 	unsigned int stateid_afflvl2 = pwr_domain_state[MPIDR_AFFLVL2];
 	unsigned int stateid_afflvl1 = pwr_domain_state[MPIDR_AFFLVL1];
 	unsigned int stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0];
+	int ret = PSCI_E_SUCCESS;
 
 	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
 
 		assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) ||
-		       (stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
+			(stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
 		assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) ||
-		       (stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
+			(stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
 
-		/* suspend the entire soc */
-		tegra_fc_soc_powerdn(mpidr);
+		if (tegra_chipid_is_t210_b01()) {
+
+			/* Suspend se/se2 and pka1 */
+			if (tegra_se_suspend() != 0) {
+				ret = PSCI_E_INTERN_FAIL;
+			}
+
+			/* Save tzram contents */
+			if (tegra_se_save_tzram() != 0) {
+				ret = PSCI_E_INTERN_FAIL;
+			}
+		}
+
+		/* enter system suspend */
+		if (ret == PSCI_E_SUCCESS) {
+			tegra_fc_soc_powerdn(mpidr);
+		}
 
 	} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) {
 
-		assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_IDLE);
+		assert(stateid_afflvl0 == PSTATE_ID_CORE_POWERDN);
 
 		/* Prepare for cluster idle */
 		tegra_fc_cluster_idle(mpidr);
 
 	} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_POWERDN) {
 
-		assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_POWERDN);
+		assert(stateid_afflvl0 == PSTATE_ID_CORE_POWERDN);
 
 		/* Prepare for cluster powerdn */
 		tegra_fc_cluster_powerdn(mpidr);
@@ -151,17 +209,27 @@
 		tegra_fc_cpu_powerdn(mpidr);
 
 	} else {
-		ERROR("%s: Unknown state id\n", __func__);
-		return PSCI_E_NOT_SUPPORTED;
+		ERROR("%s: Unknown state id (%d, %d, %d)\n", __func__,
+			stateid_afflvl2, stateid_afflvl1, stateid_afflvl0);
+		ret = PSCI_E_NOT_SUPPORTED;
 	}
 
-	return PSCI_E_SUCCESS;
+	return ret;
 }
 
 int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
 {
+	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
 	uint32_t val;
 
+	/* platform parameter passed by the previous bootloader */
+	if (plat_params->l2_ecc_parity_prot_dis != 1) {
+		/* Enable ECC Parity Protection for Cortex-A57 CPUs */
+		val = read_l2ctlr_el1();
+		val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+		write_l2ctlr_el1(val);
+	}
+
 	/*
 	 * Check if we are exiting from SOC_POWERDN.
 	 */
@@ -169,6 +237,13 @@
 			PLAT_SYS_SUSPEND_STATE_ID) {
 
 		/*
+		 * Security engine resume
+		 */
+		if (tegra_chipid_is_t210_b01()) {
+			tegra_se_resume();
+		}
+
+		/*
 		 * Lock scratch registers which hold the CPU vectors
 		 */
 		tegra_pmc_lock_cpu_vectors();
@@ -231,7 +306,7 @@
 	 * for the PMC APB clock would not be changed due to system reset.
 	 */
 	mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY,
-		       SCLK_BURST_POLICY_DEFAULT);
+		SCLK_BURST_POLICY_DEFAULT);
 	mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0);
 
 	/* Wait 1 ms to make sure clock source/device logic is stabilized. */
diff --git a/plat/nvidia/tegra/soc/t210/plat_setup.c b/plat/nvidia/tegra/soc/t210/plat_setup.c
index c7f7165..6246dde 100644
--- a/plat/nvidia/tegra/soc/t210/plat_setup.c
+++ b/plat/nvidia/tegra/soc/t210/plat_setup.c
@@ -1,34 +1,21 @@
 /*
- * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
+#include <bpmp.h>
+#include <cortex_a57.h>
 #include <common/bl_common.h>
 #include <drivers/console.h>
 #include <lib/xlat_tables/xlat_tables_v2.h>
-
+#include <platform.h>
+#include <security_engine.h>
 #include <tegra_def.h>
+#include <tegra_platform.h>
 #include <tegra_private.h>
 
-/*******************************************************************************
- * The Tegra power domain tree has a single system level power domain i.e. a
- * single root node. The first entry in the power domain descriptor specifies
- * the number of power domains at the highest power level.
- *******************************************************************************
- */
-const unsigned char tegra_power_domain_tree_desc[] = {
-	/* No of root nodes */
-	1,
-	/* No of clusters */
-	PLATFORM_CLUSTER_COUNT,
-	/* No of CPU cores - cluster0 */
-	PLATFORM_MAX_CPUS_PER_CLUSTER,
-	/* No of CPU cores - cluster1 */
-	PLATFORM_MAX_CPUS_PER_CLUSTER
-};
-
 /* sets of MMIO ranges setup */
 #define MMIO_RANGE_0_ADDR	0x50000000
 #define MMIO_RANGE_1_ADDR	0x60000000
@@ -39,6 +26,8 @@
  * Table of regions to map using the MMU.
  */
 static const mmap_region_t tegra_mmap[] = {
+	MAP_REGION_FLAT(TEGRA_IRAM_BASE, 0x40000, /* 256KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
 	MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE,
 			MT_DEVICE | MT_RW | MT_SECURE),
 	MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE,
@@ -53,11 +42,44 @@
  ******************************************************************************/
 const mmap_region_t *plat_get_mmio_map(void)
 {
+	/* Add the map region for security engine SE2 */
+	if (tegra_chipid_is_t210_b01()) {
+		mmap_add_region((uint64_t)TEGRA_SE2_BASE,
+				(uint64_t)TEGRA_SE2_BASE,
+				(uint64_t)TEGRA_SE2_RANGE_SIZE,
+				MT_DEVICE | MT_RW | MT_SECURE);
+	}
+
 	/* MMIO space */
 	return tegra_mmap;
 }
 
 /*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	1,
+	/* No of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of CPU cores - cluster0 */
+	PLATFORM_MAX_CPUS_PER_CLUSTER,
+	/* No of CPU cores - cluster1 */
+	PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/*******************************************************************************
+ * This function returns the Tegra default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return tegra_power_domain_tree_desc;
+}
+
+/*******************************************************************************
  * Handler to get the System Counter Frequency
  ******************************************************************************/
 unsigned int plat_get_syscnt_freq2(void)
@@ -94,6 +116,28 @@
 }
 
 /*******************************************************************************
+ * Handler for early platform setup
+ ******************************************************************************/
+void plat_early_platform_setup(void)
+{
+	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+	uint64_t val;
+
+	/* platform parameter passed by the previous bootloader */
+	if (plat_params->l2_ecc_parity_prot_dis != 1) {
+		/* Enable ECC Parity Protection for Cortex-A57 CPUs */
+		val = read_l2ctlr_el1();
+		val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+		write_l2ctlr_el1(val);
+	}
+
+	/* Initialize security engine driver */
+	if (tegra_chipid_is_t210_b01()) {
+		tegra_se_init();
+	}
+}
+
+/*******************************************************************************
  * Initialize the GIC and SGIs
  ******************************************************************************/
 void plat_gic_setup(void)
diff --git a/plat/nvidia/tegra/soc/t210/platform_t210.mk b/plat/nvidia/tegra/soc/t210/platform_t210.mk
index b0a474c..a9ab0d8 100644
--- a/plat/nvidia/tegra/soc/t210/platform_t210.mk
+++ b/plat/nvidia/tegra/soc/t210/platform_t210.mk
@@ -16,18 +16,22 @@
 PLATFORM_MAX_CPUS_PER_CLUSTER		:= 4
 $(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
 
-MAX_XLAT_TABLES				:= 4
+MAX_XLAT_TABLES				:= 10
 $(eval $(call add_define,MAX_XLAT_TABLES))
 
-MAX_MMAP_REGIONS			:= 8
+MAX_MMAP_REGIONS			:= 10
 $(eval $(call add_define,MAX_MMAP_REGIONS))
 
+PLAT_INCLUDES		+=	-I${SOC_DIR}/drivers/se
+
 BL31_SOURCES		+=	lib/cpus/aarch64/cortex_a53.S			\
 				lib/cpus/aarch64/cortex_a57.S			\
+				${COMMON_DIR}/drivers/bpmp/bpmp.c	\
 				${COMMON_DIR}/drivers/flowctrl/flowctrl.c	\
 				${COMMON_DIR}/drivers/memctrl/memctrl_v1.c	\
 				${SOC_DIR}/plat_psci_handlers.c			\
 				${SOC_DIR}/plat_setup.c				\
+				${SOC_DIR}/drivers/se/security_engine.c		\
 				${SOC_DIR}/plat_secondary.c
 
 # Enable workarounds for selected Cortex-A57 erratas.
diff --git a/plat/socionext/synquacer/include/plat.ld.S b/plat/socionext/synquacer/include/plat.ld.S
new file mode 100644
index 0000000..1b7f699
--- /dev/null
+++ b/plat/socionext/synquacer/include/plat.ld.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SYNQUACER_PLAT_LD_S__
+#define SYNQUACER_PLAT_LD_S__
+
+#include <xlat_tables_defs.h>
+
+#define SPM_SHIM_EXCEPTIONS_VMA		SP_DRAM
+
+MEMORY {
+	SP_DRAM (rw): ORIGIN = PLAT_SQ_SP_PRIV_BASE, LENGTH = PLAT_SQ_SP_PRIV_SIZE
+}
+
+SECTIONS
+{
+	/*
+	 * Put the page tables in secure DRAM so that the PTW can make cacheable
+	 * accesses, as the core SPM code expects. (The SRAM on SynQuacer does
+	 * not support inner shareable WBWA mappings so it is mapped normal
+	 * non-cacheable)
+	 */
+	sp_xlat_table (NOLOAD) : ALIGN(PAGE_SIZE) {
+		*(sp_xlat_table)
+		*(.bss.sp_base_xlat_table)
+	} >SP_DRAM
+}
+
+#endif /* SYNQUACER_PLAT_LD_S__ */
diff --git a/plat/socionext/synquacer/include/platform_def.h b/plat/socionext/synquacer/include/platform_def.h
index de6be7d..0cec81b 100644
--- a/plat/socionext/synquacer/include/platform_def.h
+++ b/plat/socionext/synquacer/include/platform_def.h
@@ -29,8 +29,8 @@
 
 #define PLAT_PHY_ADDR_SPACE_SIZE	(1ULL << 32)
 #define PLAT_VIRT_ADDR_SPACE_SIZE	(1ULL << 32)
-#define MAX_XLAT_TABLES			4
-#define MAX_MMAP_REGIONS		6
+#define MAX_XLAT_TABLES			8
+#define MAX_MMAP_REGIONS		8
 
 #define PLATFORM_STACK_SIZE		0x400
 
@@ -38,6 +38,10 @@
 #define BL31_SIZE			0x00080000
 #define BL31_LIMIT			(BL31_BASE + BL31_SIZE)
 
+#define BL32_BASE			0xfc000000
+#define BL32_SIZE			0x03c00000
+#define BL32_LIMIT			(BL32_BASE + BL32_SIZE)
+
 #define PLAT_SQ_CCN_BASE		0x32000000
 #define PLAT_SQ_CLUSTER_TO_CCN_ID_MAP					\
 					0,	/* Cluster 0 */		\
@@ -79,4 +83,77 @@
 
 #define PLAT_SQ_GPIO_BASE		0x51000000
 
+#define PLAT_SPM_BUF_BASE		(BL32_LIMIT - 32 * PLAT_SPM_BUF_SIZE)
+#define PLAT_SPM_BUF_SIZE		ULL(0x10000)
+#define PLAT_SPM_SPM_BUF_EL0_MMAP	MAP_REGION2(PLAT_SPM_BUF_BASE, \
+						    PLAT_SPM_BUF_BASE, \
+						    PLAT_SPM_BUF_SIZE, \
+						    MT_RO_DATA | MT_SECURE | \
+						    MT_USER, PAGE_SIZE)
+
+#define PLAT_SP_IMAGE_NS_BUF_BASE	BL32_LIMIT
+#define PLAT_SP_IMAGE_NS_BUF_SIZE	ULL(0x200000)
+#define PLAT_SP_IMAGE_NS_BUF_MMAP	MAP_REGION2(PLAT_SP_IMAGE_NS_BUF_BASE, \
+						    PLAT_SP_IMAGE_NS_BUF_BASE, \
+						    PLAT_SP_IMAGE_NS_BUF_SIZE, \
+						    MT_RW_DATA | MT_NS | \
+						    MT_USER, PAGE_SIZE)
+
+#define PLAT_SP_IMAGE_STACK_PCPU_SIZE	ULL(0x10000)
+#define PLAT_SP_IMAGE_STACK_SIZE	(32 * PLAT_SP_IMAGE_STACK_PCPU_SIZE)
+#define PLAT_SP_IMAGE_STACK_BASE	(PLAT_SQ_SP_HEAP_BASE + PLAT_SQ_SP_HEAP_SIZE)
+
+#define PLAT_SQ_SP_IMAGE_SIZE		ULL(0x200000)
+#define PLAT_SQ_SP_IMAGE_MMAP		MAP_REGION2(BL32_BASE, BL32_BASE, \
+						    PLAT_SQ_SP_IMAGE_SIZE, \
+						    MT_CODE | MT_SECURE | \
+						    MT_USER, PAGE_SIZE)
+
+#define PLAT_SQ_SP_HEAP_BASE		(BL32_BASE + PLAT_SQ_SP_IMAGE_SIZE)
+#define PLAT_SQ_SP_HEAP_SIZE		ULL(0x800000)
+
+#define PLAT_SQ_SP_IMAGE_RW_MMAP	MAP_REGION2(PLAT_SQ_SP_HEAP_BASE, \
+						    PLAT_SQ_SP_HEAP_BASE, \
+						    (PLAT_SQ_SP_HEAP_SIZE + \
+						     PLAT_SP_IMAGE_STACK_SIZE), \
+						    MT_RW_DATA | MT_SECURE | \
+						    MT_USER, PAGE_SIZE)
+
+#define PLAT_SQ_SP_PRIV_BASE		(PLAT_SP_IMAGE_STACK_BASE + \
+					 PLAT_SP_IMAGE_STACK_SIZE)
+#define PLAT_SQ_SP_PRIV_SIZE		ULL(0x40000)
+
+#define PLAT_SP_PRI			0x20
+#define PLAT_PRI_BITS			2
+#define PLAT_SPM_COOKIE_0		ULL(0)
+#define PLAT_SPM_COOKIE_1		ULL(0)
+
+/* Total number of memory regions with distinct properties */
+#define PLAT_SP_IMAGE_NUM_MEM_REGIONS	6
+
+#define PLAT_SP_IMAGE_MMAP_REGIONS	30
+#define PLAT_SP_IMAGE_MAX_XLAT_TABLES	20
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME	"sp_xlat_table"
+
+#define PLAT_SQ_UART1_BASE		PLAT_SQ_BOOT_UART_BASE
+#define PLAT_SQ_UART1_SIZE		ULL(0x1000)
+#define PLAT_SQ_UART1_MMAP		MAP_REGION_FLAT(PLAT_SQ_UART1_BASE, \
+							PLAT_SQ_UART1_SIZE, \
+							MT_DEVICE | MT_RW | \
+							MT_NS | MT_PRIVILEGED)
+
+#define PLAT_SQ_PERIPH_BASE		0x50000000
+#define PLAT_SQ_PERIPH_SIZE		ULL(0x8000000)
+#define PLAT_SQ_PERIPH_MMAP		MAP_REGION_FLAT(PLAT_SQ_PERIPH_BASE, \
+							PLAT_SQ_PERIPH_SIZE, \
+							MT_DEVICE | MT_RW | \
+							MT_NS | MT_USER)
+
+#define PLAT_SQ_FLASH_BASE		0x08000000
+#define PLAT_SQ_FLASH_SIZE		ULL(0x8000000)
+#define PLAT_SQ_FLASH_MMAP		MAP_REGION_FLAT(PLAT_SQ_FLASH_BASE, \
+							PLAT_SQ_FLASH_SIZE, \
+							MT_DEVICE | MT_RW | \
+							MT_NS | MT_USER)
+
 #endif /* PLATFORM_DEF_H */
diff --git a/plat/socionext/synquacer/platform.mk b/plat/socionext/synquacer/platform.mk
index 1bee20a..53c39a0 100644
--- a/plat/socionext/synquacer/platform.mk
+++ b/plat/socionext/synquacer/platform.mk
@@ -17,10 +17,6 @@
 # Libraries
 include lib/xlat_tables_v2/xlat_tables.mk
 
-ifeq (${SPD},opteed)
-TF_CFLAGS_aarch64	+=	-DBL32_BASE=0xfc000000
-endif
-
 PLAT_PATH		:=	plat/socionext/synquacer
 PLAT_INCLUDES		:=	-I$(PLAT_PATH)/include		\
 				-I$(PLAT_PATH)/drivers/scpi	\
@@ -47,3 +43,9 @@
 				$(PLAT_PATH)/sq_xlat_setup.c		\
 				$(PLAT_PATH)/drivers/scpi/sq_scpi.c	\
 				$(PLAT_PATH)/drivers/mhu/sq_mhu.c
+
+ifeq (${ENABLE_SPM},1)
+$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
+
+BL31_SOURCES		+=	$(PLAT_PATH)/sq_spm.c
+endif
diff --git a/plat/socionext/synquacer/sq_bl31_setup.c b/plat/socionext/synquacer/sq_bl31_setup.c
index f8d2526..2fac80f 100644
--- a/plat/socionext/synquacer/sq_bl31_setup.c
+++ b/plat/socionext/synquacer/sq_bl31_setup.c
@@ -21,6 +21,10 @@
 static entry_point_info_t bl32_image_ep_info;
 static entry_point_info_t bl33_image_ep_info;
 
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_START__, SPM_SHIM_EXCEPTIONS_START);
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_END__,   SPM_SHIM_EXCEPTIONS_END);
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_LMA__,   SPM_SHIM_EXCEPTIONS_LMA);
+
 entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
 {
 	assert(sec_state_is_valid(type));
@@ -76,7 +80,7 @@
 	/* Initialize power controller before setting up topology */
 	plat_sq_pwrc_setup();
 
-#ifdef BL32_BASE
+#ifdef SPD_opteed
 	struct draminfo di = {0};
 
 	scpi_get_draminfo(&di);
@@ -98,7 +102,7 @@
 	} else {
 		NOTICE("OP-TEE has not been loaded by SCP firmware\n");
 	}
-#endif /* BL32_BASE */
+#endif /* SPD_opteed */
 
 	/* Populate entry point information for BL33 */
 	SET_PARAM_HEAD(&bl33_image_ep_info,
@@ -155,8 +159,27 @@
 
 void bl31_plat_arch_setup(void)
 {
-	sq_mmap_setup(BL31_BASE, BL31_SIZE, NULL);
+	static const mmap_region_t secure_partition_mmap[] = {
+#if ENABLE_SPM && SPM_DEPRECATED
+		MAP_REGION_FLAT(PLAT_SPM_BUF_BASE,
+				PLAT_SPM_BUF_SIZE,
+				MT_RW_DATA | MT_SECURE),
+		MAP_REGION_FLAT(PLAT_SQ_SP_PRIV_BASE,
+				PLAT_SQ_SP_PRIV_SIZE,
+				MT_RW_DATA | MT_SECURE),
+#endif
+		{0},
+	};
+
+	sq_mmap_setup(BL31_BASE, BL31_SIZE, secure_partition_mmap);
 	enable_mmu_el3(XLAT_TABLE_NC);
+
+#if ENABLE_SPM && SPM_DEPRECATED
+	memcpy((void *)SPM_SHIM_EXCEPTIONS_START,
+	       (void *)SPM_SHIM_EXCEPTIONS_LMA,
+	       (uintptr_t)SPM_SHIM_EXCEPTIONS_END -
+	       (uintptr_t)SPM_SHIM_EXCEPTIONS_START);
+#endif
 }
 
 void bl31_plat_enable_mmu(uint32_t flags)
diff --git a/plat/socionext/synquacer/sq_spm.c b/plat/socionext/synquacer/sq_spm.c
new file mode 100644
index 0000000..01cce17
--- /dev/null
+++ b/plat/socionext/synquacer/sq_spm.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <bl31/ehf.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <services/secure_partition.h>
+
+static const mmap_region_t plat_arm_secure_partition_mmap[] = {
+	PLAT_SQ_FLASH_MMAP,
+	PLAT_SQ_UART1_MMAP,
+	PLAT_SQ_PERIPH_MMAP,
+	PLAT_SQ_SP_IMAGE_MMAP,
+	PLAT_SP_IMAGE_NS_BUF_MMAP,
+	PLAT_SQ_SP_IMAGE_RW_MMAP,
+	PLAT_SPM_SPM_BUF_EL0_MMAP,
+	{0}
+};
+
+/*
+ * Boot information passed to a secure partition during initialisation. Linear
+ * indices in MP information will be filled at runtime.
+ */
+static secure_partition_mp_info_t sp_mp_info[] = {
+	{0x80000000, 0}, {0x80000001, 0}, {0x80000100, 0}, {0x80000101, 0},
+	{0x80000200, 0}, {0x80000201, 0}, {0x80000300, 0}, {0x80000301, 0},
+	{0x80000400, 0}, {0x80000401, 0}, {0x80000500, 0}, {0x80000501, 0},
+	{0x80000600, 0}, {0x80000601, 0}, {0x80000700, 0}, {0x80000701, 0},
+	{0x80000800, 0}, {0x80000801, 0}, {0x80000900, 0}, {0x80000901, 0},
+	{0x80000a00, 0}, {0x80000a01, 0}, {0x80000b00, 0}, {0x80000b01, 0},
+};
+
+const secure_partition_boot_info_t plat_arm_secure_partition_boot_info = {
+	.h.type			= PARAM_SP_IMAGE_BOOT_INFO,
+	.h.version		= VERSION_1,
+	.h.size			= sizeof(secure_partition_boot_info_t),
+	.h.attr			= 0,
+	.sp_mem_base		= BL32_BASE,
+	.sp_mem_limit		= BL32_LIMIT,
+	.sp_image_base		= BL32_BASE,
+	.sp_stack_base		= PLAT_SP_IMAGE_STACK_BASE,
+	.sp_heap_base		= PLAT_SQ_SP_HEAP_BASE,
+	.sp_ns_comm_buf_base	= PLAT_SP_IMAGE_NS_BUF_BASE,
+	.sp_shared_buf_base	= PLAT_SPM_BUF_BASE,
+	.sp_image_size		= PLAT_SQ_SP_IMAGE_SIZE,
+	.sp_pcpu_stack_size	= PLAT_SP_IMAGE_STACK_PCPU_SIZE,
+	.sp_heap_size		= PLAT_SQ_SP_HEAP_SIZE,
+	.sp_ns_comm_buf_size	= PLAT_SP_IMAGE_NS_BUF_SIZE,
+	.sp_shared_buf_size	= PLAT_SPM_BUF_SIZE,
+	.num_sp_mem_regions	= PLAT_SP_IMAGE_NUM_MEM_REGIONS,
+	.num_cpus		= PLATFORM_CORE_COUNT,
+	.mp_info		= sp_mp_info,
+};
+
+const struct mmap_region *plat_get_secure_partition_mmap(void *cookie)
+{
+	return plat_arm_secure_partition_mmap;
+}
+
+const struct secure_partition_boot_info *plat_get_secure_partition_boot_info(
+		void *cookie)
+{
+	return &plat_arm_secure_partition_boot_info;
+}
+
+static ehf_pri_desc_t sq_exceptions[] = {
+	EHF_PRI_DESC(PLAT_PRI_BITS, PLAT_SP_PRI),
+};
+EHF_REGISTER_PRIORITIES(sq_exceptions, ARRAY_SIZE(sq_exceptions), PLAT_PRI_BITS);
diff --git a/services/std_svc/spm_deprecated/spm.mk b/services/std_svc/spm_deprecated/spm.mk
index ed36812..3503020 100644
--- a/services/std_svc/spm_deprecated/spm.mk
+++ b/services/std_svc/spm_deprecated/spm.mk
@@ -21,3 +21,6 @@
 
 # Let the top-level Makefile know that we intend to include a BL32 image
 NEED_BL32		:=	yes
+
+# required so that SPM code executing at S-EL0 can access the timer registers
+NS_TIMER_SWITCH		:=	1
diff --git a/services/std_svc/spm_deprecated/spm_main.c b/services/std_svc/spm_deprecated/spm_main.c
index 540f257..7525763 100644
--- a/services/std_svc/spm_deprecated/spm_main.c
+++ b/services/std_svc/spm_deprecated/spm_main.c
@@ -151,7 +151,7 @@
 
 	INFO("Secure Partition initialized.\n");
 
-	return rc;
+	return !rc;
 }
 
 /*******************************************************************************
diff --git a/services/std_svc/spm_deprecated/spm_setup.c b/services/std_svc/spm_deprecated/spm_setup.c
index d458f4a..aae6cd5 100644
--- a/services/std_svc/spm_deprecated/spm_setup.c
+++ b/services/std_svc/spm_deprecated/spm_setup.c
@@ -84,10 +84,10 @@
 	unsigned int max_granule_mask = max_granule - 1U;
 
 	/* Base must be aligned to the max granularity */
-	assert((ARM_SP_IMAGE_NS_BUF_BASE & max_granule_mask) == 0);
+	assert((PLAT_SP_IMAGE_NS_BUF_BASE & max_granule_mask) == 0);
 
 	/* Size must be a multiple of the max granularity */
-	assert((ARM_SP_IMAGE_NS_BUF_SIZE & max_granule_mask) == 0);
+	assert((PLAT_SP_IMAGE_NS_BUF_SIZE & max_granule_mask) == 0);
 
 #endif /* ENABLE_ASSERTIONS */
 
@@ -144,8 +144,6 @@
 		SCTLR_SA0_BIT							|
 		/* Allow cacheable data and instr. accesses to normal memory. */
 		SCTLR_C_BIT | SCTLR_I_BIT					|
-		/* Alignment fault checking enabled when at EL1 and EL0. */
-		SCTLR_A_BIT							|
 		/* Enable MMU. */
 		SCTLR_M_BIT
 	;
@@ -153,6 +151,11 @@
 	sctlr_el1 &= ~(
 		/* Explicit data accesses at EL0 are little-endian. */
 		SCTLR_E0E_BIT							|
+		/*
+		 * Alignment fault checking disabled when at EL1 and EL0 as
+		 * the UEFI spec permits unaligned accesses.
+		 */
+		SCTLR_A_BIT							|
 		/* Accesses to DAIF from EL0 are trapped to EL1. */
 		SCTLR_UMA_BIT
 	);
@@ -168,6 +171,9 @@
 	write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
 			SPM_SHIM_EXCEPTIONS_PTR);
 
+	write_ctx_reg(get_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
+		      EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
+
 	/*
 	 * FPEN: Allow the Secure Partition to access FP/SIMD registers.
 	 * Note that SPM will not do any saving/restoring of these registers on