Tegra186: mce: driver for the CPU complex power manager block

The CPU Complex (CCPLEX) Power Manager (Denver MCE, or DMCE) is an
offload engine for BPMP to do voltage related sequencing and for
hardware requests to be handled in a better latency than BPMP-firmware.

There are two interfaces to the MCEs - Abstract Request Interface (ARI)
and the traditional NVGINDEX/NVGDATA interface.

MCE supports various commands which can be used by CPUs - ARM as well
as Denver, for power management and reset functionality. Since the
linux kernel is the master for all these scenarios, each MCE command
can be issued by a corresponding SMC. These SMCs have been moved to
SiP SMC space as they are specific to the Tegra186 SoC.

Change-Id: I67bee83d2289a8ab63bc5556e5744e5043803e51
Signed-off-by: Varun Wadekar <vwadekar@nvidia.com>

Signed-off-by: Varun Wadekar <vwadekar@nvidia.com>
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
new file mode 100644
index 0000000..e1398cc
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	nvg_set_request_data
+	.globl	nvg_set_request
+	.globl	nvg_get_result
+
+/* void nvg_set_request_data(uint64_t req, uint64_t data) */
+func nvg_set_request_data
+	msr	s3_0_c15_c1_2, x0
+	msr	s3_0_c15_c1_3, x1
+	isb
+	ret
+endfunc nvg_set_request_data
+
+/* void nvg_set_request(uint64_t req) */
+func nvg_set_request
+	msr	s3_0_c15_c1_2, x0
+	ret
+endfunc nvg_set_request
+
+/* uint64_t nvg_get_result(void) */
+func nvg_get_result
+	mrs	x0, s3_0_c15_c1_3
+	ret
+endfunc nvg_get_result
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
new file mode 100644
index 0000000..147a358
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <denver.h>
+#include <mmio.h>
+#include <mce.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+
+/*******************************************************************************
+ * Register offsets for ARI request/results
+ ******************************************************************************/
+#define ARI_REQUEST			0x0
+#define ARI_REQUEST_EVENT_MASK		0x4
+#define ARI_STATUS			0x8
+#define ARI_REQUEST_DATA_LO		0xC
+#define ARI_REQUEST_DATA_HI		0x10
+#define ARI_RESPONSE_DATA_LO		0x14
+#define ARI_RESPONSE_DATA_HI		0x18
+
+/* Status values for the current request */
+#define ARI_REQ_PENDING			1
+#define ARI_REQ_ONGOING			3
+#define ARI_REQUEST_VALID_BIT		(1 << 8)
+#define ARI_EVT_MASK_STANDBYWFI_BIT	(1 << 7)
+
+/*******************************************************************************
+ * ARI helper functions
+ ******************************************************************************/
+static inline uint32_t ari_read_32(uint32_t ari_base, uint32_t reg)
+{
+	return mmio_read_32(ari_base + reg);
+}
+
+static inline void ari_write_32(uint32_t ari_base, uint32_t val, uint32_t reg)
+{
+	mmio_write_32(ari_base + reg, val);
+}
+
+static inline uint32_t ari_get_request_low(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_REQUEST_DATA_LO);
+}
+
+static inline uint32_t ari_get_request_high(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_REQUEST_DATA_HI);
+}
+
+static inline uint32_t ari_get_response_low(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_RESPONSE_DATA_LO);
+}
+
+static inline uint32_t ari_get_response_high(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_RESPONSE_DATA_HI);
+}
+
+static inline void ari_clobber_response(uint32_t ari_base)
+{
+	ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_LO);
+	ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_HI);
+}
+
+static int ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
+		uint32_t lo, uint32_t hi)
+{
+	int status;
+
+	/* program the request, event_mask, hi and lo registers */
+	ari_write_32(ari_base, lo, ARI_REQUEST_DATA_LO);
+	ari_write_32(ari_base, hi, ARI_REQUEST_DATA_HI);
+	ari_write_32(ari_base, evt_mask, ARI_REQUEST_EVENT_MASK);
+	ari_write_32(ari_base, req | ARI_REQUEST_VALID_BIT, ARI_REQUEST);
+
+	/*
+	 * For commands that have an event trigger, we should bypass
+	 * ARI_STATUS polling, since MCE is waiting for SW to trigger
+	 * the event.
+	 */
+	if (evt_mask)
+		return 0;
+
+	/* NOTE: add timeout check if needed */
+	status = ari_read_32(ari_base, ARI_STATUS);
+	while (status & (ARI_REQ_ONGOING | ARI_REQ_PENDING))
+		status = ari_read_32(ari_base, ARI_STATUS);
+
+	return 0;
+}
+
+int ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	/* check for allowed power state */
+	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
+	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		return EINVAL;
+	}
+
+	/* Enter the cstate, to be woken up after wake_time (TSC ticks) */
+	return ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
+		TEGRA_ARI_ENTER_CSTATE, state, wake_time);
+}
+
+int ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+	uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+	uint8_t update_wake_mask)
+{
+	uint32_t val = 0;
+
+	/* update CLUSTER_CSTATE? */
+	if (cluster)
+		val |= (cluster & CLUSTER_CSTATE_MASK) |
+			CLUSTER_CSTATE_UPDATE_BIT;
+
+	/* update CCPLEX_CSTATE? */
+	if (ccplex)
+		val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
+			CCPLEX_CSTATE_UPDATE_BIT;
+
+	/* update SYSTEM_CSTATE? */
+	if (system)
+		val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+		       ((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+			SYSTEM_CSTATE_UPDATE_BIT);
+
+	/* update wake mask value? */
+	if (update_wake_mask)
+		val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+
+	/* set the updated cstate info */
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CSTATE_INFO, val,
+			wake_mask);
+}
+
+int ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+	/* sanity check crossover type */
+	if ((type == TEGRA_ARI_CROSSOVER_C1_C6) ||
+	    (type > TEGRA_ARI_CROSSOVER_CCP3_SC1))
+		return EINVAL;
+
+	/* update crossover threshold time */
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CROSSOVER,
+			type, time);
+}
+
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+	int ret;
+
+	/* sanity check crossover type */
+	if (state == 0)
+		return EINVAL;
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_CSTATE_STATS, state, 0);
+	if (ret != 0)
+		return EINVAL;
+
+	return (uint64_t)ari_get_response_low(ari_base);
+}
+
+int ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+	/* write the cstate stats */
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_WRITE_CSTATE_STATS, state,
+			stats);
+}
+
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
+{
+	uint64_t resp;
+	int ret;
+
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/* ARI_REQUEST_DATA_HI is reserved for commands other than 'ECHO' */
+	if (cmd != TEGRA_ARI_MISC_ECHO)
+		data = 0;
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_MISC, cmd, data);
+	if (ret)
+		return (uint64_t)ret;
+
+	/* get the command response */
+	resp = ari_get_response_low(ari_base);
+	resp |= ((uint64_t)ari_get_response_high(ari_base) << 32);
+
+	return resp;
+}
+
+int ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	int ret;
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7,
+			wake_time);
+	if (ret) {
+		ERROR("%s: failed (%d)\n", __func__, ret);
+		return 0;
+	}
+
+	/* 1 = CCx allowed, 0 = CCx not allowed */
+	return (ari_get_response_low(ari_base) & 0x1);
+}
+
+int ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	int ret;
+
+	/* check for allowed power state */
+	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
+	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		return EINVAL;
+	}
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_IS_SC7_ALLOWED, state,
+			wake_time);
+	if (ret) {
+		ERROR("%s: failed (%d)\n", __func__, ret);
+		return 0;
+	}
+
+	/* 1 = SC7 allowed, 0 = SC7 not allowed */
+	return !!ari_get_response_low(ari_base);
+}
+
+int ari_online_core(uint32_t ari_base, uint32_t core)
+{
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	int cluster = (read_mpidr() & MPIDR_CLUSTER_MASK) >>
+			MPIDR_AFFINITY_BITS;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/* construct the current CPU # */
+	cpu |= (cluster << 2);
+
+	/* sanity check target core id */
+	if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
+		ERROR("%s: unsupported core id (%d)\n", __func__, core);
+		return EINVAL;
+	}
+
+	/*
+	 * The Denver cluster has 2 CPUs only - 0, 1.
+	 */
+	if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
+		ERROR("%s: unknown core id (%d)\n", __func__, core);
+		return EINVAL;
+	}
+
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_ONLINE_CORE, core, 0);
+}
+
+int ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+	int val;
+
+	/*
+	 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+	 * the SW visible voltage/frequency request registers for all non
+	 * floorswept cores valid independent of StandbyWFI and disabling
+	 * the IDLE voltage/frequency request register. If set, Auto-CC3
+	 * will be enabled by setting the ARM SW visible voltage/frequency
+	 * request registers for all non floorswept cores to be enabled by
+	 * StandbyWFI or the equivalent signal, and always keeping the IDLE
+	 * voltage/frequency request register enabled.
+	 */
+	val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
+		((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
+		(enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));
+
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_CC3_CTRL, val, 0);
+}
+
+int ari_reset_vector_update(uint32_t ari_base, uint32_t lo, uint32_t hi)
+{
+	/*
+	 * Need to program the CPU reset vector one time during cold boot
+	 * and SC7 exit
+	 */
+	ari_request_wait(ari_base, 0, TEGRA_ARI_COPY_MISCREG_AA64_RST, lo, hi);
+
+	return 0;
+}
+
+int ari_roc_flush_cache_trbits(uint32_t ari_base)
+{
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS,
+			0, 0);
+}
+
+int ari_roc_flush_cache(uint32_t ari_base)
+{
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY,
+			0, 0);
+}
+
+int ari_roc_clean_cache(uint32_t ari_base)
+{
+	return ari_request_wait(ari_base, 0, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY,
+			0, 0);
+}
+
+uint64_t ari_read_write_mca(uint32_t ari_base, mca_cmd_t cmd, uint64_t *data)
+{
+	mca_arg_t mca_arg;
+	int ret;
+
+	/* Set data (write) */
+	mca_arg.data = data ? *data : 0ull;
+
+	/* Set command */
+	ari_write_32(ari_base, cmd.input.low, ARI_RESPONSE_DATA_LO);
+	ari_write_32(ari_base, cmd.input.high, ARI_RESPONSE_DATA_HI);
+
+	ret = ari_request_wait(ari_base, 0, TEGRA_ARI_MCA, mca_arg.arg.low,
+			mca_arg.arg.high);
+	if (!ret) {
+		mca_arg.arg.low = ari_get_response_low(ari_base);
+		mca_arg.arg.high = ari_get_response_high(ari_base);
+		if (!mca_arg.err.finish)
+			return (uint64_t)mca_arg.err.error;
+
+		if (data) {
+			mca_arg.arg.low = ari_get_request_low(ari_base);
+			mca_arg.arg.high = ari_get_request_high(ari_base);
+			*data = mca_arg.data;
+		}
+	}
+
+	return 0;
+}
+
+int ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
+{
+	/* sanity check GSC ID */
+	if (gsc_idx > TEGRA_ARI_GSC_VPR_IDX)
+		return EINVAL;
+
+	/*
+	 * The MCE code will read the GSC carveout value, corrseponding to
+	 * the ID, from the MC registers and update the internal GSC registers
+	 * of the CCPLEX.
+	 */
+	ari_request_wait(ari_base, 0, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0);
+
+	return 0;
+}
+
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
+{
+	/*
+	 * The MCE will shutdown or restart the entire system
+	 */
+	(void)ari_request_wait(ari_base, 0, TEGRA_ARI_MISC_CCPLEX, state_idx, 0);
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
new file mode 100644
index 0000000..9815107
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce.h>
+#include <mmio.h>
+#include <string.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+#include <tegra_def.h>
+
+/* NVG functions handlers */
+static arch_mce_ops_t nvg_mce_ops = {
+	.enter_cstate = nvg_enter_cstate,
+	.update_cstate_info = nvg_update_cstate_info,
+	.update_crossover_time = nvg_update_crossover_time,
+	.read_cstate_stats = nvg_read_cstate_stats,
+	.write_cstate_stats = nvg_write_cstate_stats,
+	.call_enum_misc = ari_enumeration_misc,
+	.is_ccx_allowed = nvg_is_ccx_allowed,
+	.is_sc7_allowed = nvg_is_sc7_allowed,
+	.online_core = nvg_online_core,
+	.cc3_ctrl = nvg_cc3_ctrl,
+	.update_reset_vector = ari_reset_vector_update,
+	.roc_flush_cache = ari_roc_flush_cache,
+	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+	.roc_clean_cache = ari_roc_clean_cache,
+	.read_write_mca = ari_read_write_mca,
+	.update_ccplex_gsc = ari_update_ccplex_gsc,
+	.enter_ccplex_state = ari_enter_ccplex_state
+};
+
+/* ARI functions handlers */
+static arch_mce_ops_t ari_mce_ops = {
+	.enter_cstate = ari_enter_cstate,
+	.update_cstate_info = ari_update_cstate_info,
+	.update_crossover_time = ari_update_crossover_time,
+	.read_cstate_stats = ari_read_cstate_stats,
+	.write_cstate_stats = ari_write_cstate_stats,
+	.call_enum_misc = ari_enumeration_misc,
+	.is_ccx_allowed = ari_is_ccx_allowed,
+	.is_sc7_allowed = ari_is_sc7_allowed,
+	.online_core = ari_online_core,
+	.cc3_ctrl = ari_cc3_ctrl,
+	.update_reset_vector = ari_reset_vector_update,
+	.roc_flush_cache = ari_roc_flush_cache,
+	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+	.roc_clean_cache = ari_roc_clean_cache,
+	.read_write_mca = ari_read_write_mca,
+	.update_ccplex_gsc = ari_update_ccplex_gsc,
+	.enter_ccplex_state = ari_enter_ccplex_state
+};
+
+typedef struct mce_config {
+	uint32_t ari_base;
+	arch_mce_ops_t *ops;
+} mce_config_t;
+
+/* Table to hold the per-CPU ARI base address and function handlers */
+static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
+	{
+		/* A57 Core 0 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 1 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 2 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 3 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* D15 Core 0 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
+		.ops = &nvg_mce_ops,
+	},
+	{
+		/* D15 Core 1 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
+		.ops = &nvg_mce_ops,
+	}
+};
+
+static uint32_t mce_get_curr_cpu_ari_base(void)
+{
+	uint32_t mpidr = read_mpidr();
+	int cpuid =  mpidr & MPIDR_CPU_MASK;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/*
+	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+	 * numbers start from 0. In order to get the proper arch_mce_ops_t
+	 * struct, we have to convert the Denver CPU ids to the corresponding
+	 * indices in the mce_ops_table array.
+	 */
+	if (impl == DENVER_IMPL)
+		cpuid |= 0x4;
+
+	return mce_cfg_table[cpuid].ari_base;
+}
+
+static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
+{
+	uint32_t mpidr = read_mpidr();
+	int cpuid =  mpidr & MPIDR_CPU_MASK;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/*
+	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+	 * numbers start from 0. In order to get the proper arch_mce_ops_t
+	 * struct, we have to convert the Denver CPU ids to the corresponding
+	 * indices in the mce_ops_table array.
+	 */
+	if (impl == DENVER_IMPL)
+		cpuid |= 0x4;
+
+	return mce_cfg_table[cpuid].ops;
+}
+
+/*******************************************************************************
+ * Common handler for all MCE commands
+ ******************************************************************************/
+int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1,
+			uint64_t arg2)
+{
+	arch_mce_ops_t *ops;
+	uint32_t cpu_ari_base;
+	uint64_t ret64 = 0, arg3, arg4, arg5;
+	int ret = 0;
+	mca_cmd_t mca_cmd;
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	gp_regs_t *gp_regs = get_gpregs_ctx(ctx);
+
+	assert(ctx);
+	assert(gp_regs);
+
+	/* get a pointer to the CPU's arch_mce_ops_t struct */
+	ops = mce_get_curr_cpu_ops();
+
+	/* get the CPU's ARI base address */
+	cpu_ari_base = mce_get_curr_cpu_ari_base();
+
+	switch (cmd) {
+	case MCE_CMD_ENTER_CSTATE:
+		ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
+		if (ret < 0)
+			ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_UPDATE_CSTATE_INFO:
+		/*
+		 * get the parameters required for the update cstate info
+		 * command
+		 */
+		arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
+		arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
+		arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
+
+		ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
+				(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
+				(uint32_t)arg4, (uint8_t)arg5);
+		if (ret < 0)
+			ERROR("%s: update_cstate_info failed(%d)\n",
+				__func__, ret);
+
+		write_ctx_reg(gp_regs, CTX_GPREG_X4, 0);
+		write_ctx_reg(gp_regs, CTX_GPREG_X5, 0);
+		write_ctx_reg(gp_regs, CTX_GPREG_X6, 0);
+
+		break;
+
+	case MCE_CMD_UPDATE_CROSSOVER_TIME:
+		ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
+		if (ret < 0)
+			ERROR("%s: update_crossover_time failed(%d)\n",
+				__func__, ret);
+
+		break;
+
+	case MCE_CMD_READ_CSTATE_STATS:
+		ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
+
+		/* update context to return cstate stats value */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64);
+
+		break;
+
+	case MCE_CMD_WRITE_CSTATE_STATS:
+		ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
+		if (ret < 0)
+			ERROR("%s: write_cstate_stats failed(%d)\n",
+				__func__, ret);
+
+		break;
+
+	case MCE_CMD_IS_CCX_ALLOWED:
+		ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
+			break;
+		}
+
+		/* update context to return CCx status value */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
+
+		break;
+
+	case MCE_CMD_IS_SC7_ALLOWED:
+		ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
+			break;
+		}
+
+		/* update context to return SC7 status value */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret);
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret);
+		ret = 0;
+
+		break;
+
+	case MCE_CMD_ONLINE_CORE:
+		ret = ops->online_core(cpu_ari_base, arg0);
+		if (ret < 0)
+			ERROR("%s: online_core failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_CC3_CTRL:
+		ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
+		if (ret < 0)
+			ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_ECHO_DATA:
+		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
+				arg0);
+
+		/* update context to return if echo'd data matched source */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64 == arg0);
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, ret64 == arg0);
+
+		break;
+
+	case MCE_CMD_READ_VERSIONS:
+		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
+			arg0);
+
+		/*
+		 * version = minor(63:32) | major(31:0). Update context
+		 * to return major and minor version number.
+		 */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint32_t)ret64);
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, (uint32_t)(ret64 >> 32));
+
+		break;
+
+	case MCE_CMD_ENUM_FEATURES:
+		ret = ops->call_enum_misc(cpu_ari_base,
+				TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
+
+		/* update context to return features value */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
+
+		ret = 0;
+
+		break;
+
+	case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+		ret = ops->roc_flush_cache_trbits(cpu_ari_base);
+		if (ret < 0)
+			ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
+				ret);
+
+		break;
+
+	case MCE_CMD_ROC_FLUSH_CACHE:
+		ret = ops->roc_flush_cache(cpu_ari_base);
+		if (ret < 0)
+			ERROR("%s: flush cache failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_ROC_CLEAN_CACHE:
+		ret = ops->roc_clean_cache(cpu_ari_base);
+		if (ret < 0)
+			ERROR("%s: clean cache failed(%d)\n", __func__, ret);
+
+		break;
+
+	case MCE_CMD_ENUM_READ_MCA:
+		memcpy(&mca_cmd, &arg0, sizeof(arg0));
+		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
+
+		/* update context to return MCA data/error */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
+		write_ctx_reg(gp_regs, CTX_GPREG_X2, arg1);
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
+
+		break;
+
+	case MCE_CMD_ENUM_WRITE_MCA:
+		memcpy(&mca_cmd, &arg0, sizeof(arg0));
+		ret64 = ops->read_write_mca(cpu_ari_base, mca_cmd, &arg1);
+
+		/* update context to return MCA error */
+		write_ctx_reg(gp_regs, CTX_GPREG_X1, ret64);
+		write_ctx_reg(gp_regs, CTX_GPREG_X3, ret64);
+
+		break;
+
+	default:
+		ERROR("unknown MCE command (%d)\n", cmd);
+		return EINVAL;
+	}
+
+	return ret;
+}
+
+/*******************************************************************************
+ * Handler to update the reset vector for CPUs
+ ******************************************************************************/
+int mce_update_reset_vector(uint32_t addr_lo, uint32_t addr_hi)
+{
+	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	ops->update_reset_vector(mce_get_curr_cpu_ari_base(), addr_lo, addr_hi);
+
+	return 0;
+}
+
+static int mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
+{
+	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for Video Memory Carveout region
+ ******************************************************************************/
+int mce_update_gsc_videomem(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZDRAM aperture
+ ******************************************************************************/
+int mce_update_gsc_tzdram(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZ SysRAM aperture
+ ******************************************************************************/
+int mce_update_gsc_tzram(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
+}
+
+/*******************************************************************************
+ * Handler to shutdown/reset the entire system
+ ******************************************************************************/
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx)
+{
+	arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	/* sanity check state value */
+	if (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF &&
+	    state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)
+		panic();
+
+	ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
+
+	/* wait till the CCPLEX powers down */
+	for (;;)
+		;
+
+	panic();
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
new file mode 100644
index 0000000..25479a2
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <denver.h>
+#include <mmio.h>
+#include <mce.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+
+extern void nvg_set_request_data(uint64_t req, uint64_t data);
+extern void nvg_set_request(uint64_t req);
+extern uint64_t nvg_get_result(void);
+
+int nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	/* check for allowed power state */
+	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
+	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		return EINVAL;
+	}
+
+	/* time (TSC ticks) until the core is expected to get a wake event */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
+
+	/* set the core cstate */
+	write_actlr_el1(state);
+
+	return 0;
+}
+
+/*
+ * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
+ * SYSTEM_CSTATE values.
+ */
+int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+		uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+		uint8_t update_wake_mask)
+{
+	uint64_t val = 0;
+
+	/* update CLUSTER_CSTATE? */
+	if (cluster)
+		val |= (cluster & CLUSTER_CSTATE_MASK) |
+			CLUSTER_CSTATE_UPDATE_BIT;
+
+	/* update CCPLEX_CSTATE? */
+	if (ccplex)
+		val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
+			CCPLEX_CSTATE_UPDATE_BIT;
+
+	/* update SYSTEM_CSTATE? */
+	if (system)
+		val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+		       ((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+			SYSTEM_CSTATE_UPDATE_BIT);
+
+	/* update wake mask value? */
+	if (update_wake_mask)
+		val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+
+	/* set the wake mask */
+	val &= CSTATE_WAKE_MASK_CLEAR;
+	val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT);
+
+	/* set the updated cstate info */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
+
+	return 0;
+}
+
+int nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+	/* sanity check crossover type */
+	if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)
+		return EINVAL;
+
+	/*
+	 * The crossover threshold limit types start from
+	 * TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7. The
+	 * command indices for updating the threshold can be generated
+	 * by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
+	 * command index.
+	 */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 + type,
+		(uint64_t)time);
+
+	return 0;
+}
+
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+	/* sanity check state */
+	if (state == 0)
+		return EINVAL;
+
+	/*
+	 * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+	 * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+	 * reading the threshold can be generated by adding the type to
+	 * the NVG_CLEAR_CSTATE_STATS command index.
+	 */
+	nvg_set_request(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state);
+
+	return (int64_t)nvg_get_result();
+}
+
+int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+	uint64_t val;
+
+	/*
+	 * The only difference between a CSTATE_STATS_WRITE and
+	 * CSTATE_STATS_READ is the usage of the 63:32 in the request.
+	 * 63:32 are set to '0' for a read, while a write contains the
+	 * actual stats value to be written.
+	 */
+	val = ((uint64_t)stats << MCE_CSTATE_STATS_TYPE_SHIFT) | state;
+
+	/*
+	 * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+	 * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+	 * reading the threshold can be generated by adding the type to
+	 * the NVG_CLEAR_CSTATE_STATS command index.
+	 */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state, val);
+
+	return 0;
+}
+
+int nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	/* This does not apply to the Denver cluster */
+	return 0;
+}
+
+int nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	uint64_t val;
+
+	/* check for allowed power state */
+	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
+	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		return EINVAL;
+	}
+
+	/*
+	 * Request format -
+	 * 63:32 = wake time
+	 * 31:0 = C-state for this core
+	 */
+	val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
+			(state & MCE_SC7_ALLOWED_MASK);
+
+	/* issue command to check if SC7 is allowed */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
+
+	/* 1 = SC7 allowed, 0 = SC7 not allowed */
+	return !!nvg_get_result();
+}
+
+int nvg_online_core(uint32_t ari_base, uint32_t core)
+{
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/* sanity check code id */
+	if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
+		ERROR("%s: unsupported core id (%d)\n", __func__, core);
+		return EINVAL;
+	}
+
+	/*
+	 * The Denver cluster has 2 CPUs only - 0, 1.
+	 */
+	if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
+		ERROR("%s: unknown core id (%d)\n", __func__, core);
+		return EINVAL;
+	}
+
+	/* get a core online */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE, core & MCE_CORE_ID_MASK);
+
+	return 0;
+}
+
+int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+	int val;
+
+	/*
+	 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+	 * the SW visible voltage/frequency request registers for all non
+	 * floorswept cores valid independent of StandbyWFI and disabling
+	 * the IDLE voltage/frequency request register. If set, Auto-CC3
+	 * will be enabled by setting the ARM SW visible voltage/frequency
+	 * request registers for all non floorswept cores to be enabled by
+	 * StandbyWFI or the equivalent signal, and always keeping the IDLE
+	 * voltage/frequency request register enabled.
+	 */
+	val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
+		((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
+		(enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));
+
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, val);
+
+	return 0;
+}