blob: 8f6e19fc3ef66c362e7172e097ad04127181eb54 [file] [log] [blame]
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07001/*
Varun Wadekar4edc17c2017-11-20 17:14:47 -08002 * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07008#include <assert.h>
Steven Kao530b2172017-06-23 16:18:58 +08009#include <stdbool.h>
10#include <string.h>
11
12#include <arch_helpers.h>
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +053013#include <bpmp_ipc.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070014#include <common/bl_common.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070015#include <common/debug.h>
Steven Kao530b2172017-06-23 16:18:58 +080016#include <context.h>
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +053017#include <drivers/delay_timer.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070018#include <denver.h>
Steven Kao530b2172017-06-23 16:18:58 +080019#include <lib/el3_runtime/context_mgmt.h>
20#include <lib/psci/psci.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070021#include <mce.h>
Dilan Lee4e7a63c2017-08-10 16:01:42 +080022#include <mce_private.h>
Pritesh Raithatha75c94432018-08-03 15:48:15 +053023#include <memctrl_v2.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070024#include <plat/common/platform.h>
Steven Kao530b2172017-06-23 16:18:58 +080025#include <se.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070026#include <smmu.h>
Tejal Kudav153ba222017-02-14 18:02:04 -080027#include <t194_nvg.h>
Varun Wadekare0c222f2017-11-10 13:23:34 -080028#include <tegra194_private.h>
Steven Kao530b2172017-06-23 16:18:58 +080029#include <tegra_platform.h>
30#include <tegra_private.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070031
Varun Wadekar362a6b22017-11-10 11:04:42 -080032extern uint32_t __tegra194_cpu_reset_handler_data,
33 __tegra194_cpu_reset_handler_end;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070034
35/* TZDRAM offset for saving SMMU context */
Varun Wadekar362a6b22017-11-10 11:04:42 -080036#define TEGRA194_SMMU_CTX_OFFSET 16U
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070037
38/* state id mask */
Varun Wadekar362a6b22017-11-10 11:04:42 -080039#define TEGRA194_STATE_ID_MASK 0xFU
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070040/* constants to get power state's wake time */
Varun Wadekar362a6b22017-11-10 11:04:42 -080041#define TEGRA194_WAKE_TIME_MASK 0x0FFFFFF0U
42#define TEGRA194_WAKE_TIME_SHIFT 4U
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070043/* default core wake mask for CPU_SUSPEND */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080044#define TEGRA194_CORE_WAKE_MASK 0x180cU
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070045
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080046static struct t19x_psci_percpu_data {
47 uint32_t wake_time;
48} __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070049
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080050int32_t tegra_soc_validate_power_state(uint32_t power_state,
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070051 psci_power_state_t *req_state)
52{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080053 uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
Varun Wadekar362a6b22017-11-10 11:04:42 -080054 TEGRA194_STATE_ID_MASK;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080055 uint32_t cpu = plat_my_core_pos();
56 int32_t ret = PSCI_E_SUCCESS;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070057
58 /* save the core wake time (in TSC ticks)*/
Varun Wadekar362a6b22017-11-10 11:04:42 -080059 t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
60 << TEGRA194_WAKE_TIME_SHIFT;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070061
62 /*
Varun Wadekar56c64592019-12-03 08:50:57 -080063 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
64 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
65 * which is called with caches disabled. It is possible to read a stale
66 * value from DRAM in that function, because the L2 cache is not flushed
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070067 * unless the cluster is entering CC6/CC7.
68 */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080069 clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
70 sizeof(t19x_percpu_data[cpu]));
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070071
72 /* Sanity check the requested state id */
73 switch (state_id) {
74 case PSTATE_ID_CORE_IDLE:
Varun Wadekarc61094b2017-12-27 18:01:59 -080075
76 /* Core idle request */
77 req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
78 req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
79 break;
80
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070081 default:
82 ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080083 ret = PSCI_E_INVALID_PARAMS;
84 break;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070085 }
86
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080087 return ret;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070088}
89
Varun Wadekarc61094b2017-12-27 18:01:59 -080090int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
91{
92 uint32_t cpu = plat_my_core_pos();
93 mce_cstate_info_t cstate_info = { 0 };
94
95 /* Program default wake mask */
96 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
97 cstate_info.update_wake_mask = 1;
98 mce_update_cstate_info(&cstate_info);
99
100 /* Enter CPU idle */
101 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
102 (uint64_t)TEGRA_NVG_CORE_C6,
103 t19x_percpu_data[cpu].wake_time,
104 0U);
105
106 return PSCI_E_SUCCESS;
107}
108
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800109int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700110{
111 const plat_local_state_t *pwr_domain_state;
Varun Wadekar83e4d4b2020-04-23 09:56:06 -0700112 uint8_t stateid_afflvl2;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700113 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
Pritesh Raithatha75c94432018-08-03 15:48:15 +0530114 uint64_t mc_ctx_base;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700115 uint32_t val;
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700116 mce_cstate_info_t sc7_cstate_info = {
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800117 .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
Vignesh Radhakrishnan85c129f2017-12-20 15:04:26 -0800118 .ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800119 .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
120 .system_state_force = 1U,
121 .update_wake_mask = 1U,
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700122 };
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700123 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700124
125 /* get the state ID */
126 pwr_domain_state = target_state->pwr_domain_state;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700127 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
Varun Wadekar362a6b22017-11-10 11:04:42 -0800128 TEGRA194_STATE_ID_MASK;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700129
Varun Wadekar83e4d4b2020-04-23 09:56:06 -0700130 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700131
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700132 /* save 'Secure Boot' Processor Feature Config Register */
133 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
Steven Kao4607f172017-10-23 18:35:14 +0800134 mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700135
Pritesh Raithatha75c94432018-08-03 15:48:15 +0530136 /* save MC context */
137 mc_ctx_base = params_from_bl2->tzdram_base +
138 tegra194_get_mc_ctx_offset();
139 tegra_mc_save_context((uintptr_t)mc_ctx_base);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700140
Steven Kao530b2172017-06-23 16:18:58 +0800141 /*
142 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
143 * since VDK does not support atomic se ctx save
144 */
145 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
146 ret = tegra_se_suspend();
147 assert(ret == 0);
148 }
149
Varun Wadekar953699c2018-06-06 17:26:10 -0700150 /* Prepare for system suspend */
151 mce_update_cstate_info(&sc7_cstate_info);
Tejal Kudav153ba222017-02-14 18:02:04 -0800152
Varun Wadekar953699c2018-06-06 17:26:10 -0700153 do {
154 val = (uint32_t)mce_command_handler(
155 (uint32_t)MCE_CMD_IS_SC7_ALLOWED,
156 (uint32_t)TEGRA_NVG_CORE_C7,
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800157 MCE_CORE_SLEEP_TIME_INFINITE,
158 0U);
Varun Wadekar953699c2018-06-06 17:26:10 -0700159 } while (val == 0U);
Varun Wadekarda865de2017-11-10 13:27:29 -0800160
Varun Wadekar953699c2018-06-06 17:26:10 -0700161 /* Instruct the MCE to enter system suspend state */
162 ret = mce_command_handler(
163 (uint64_t)MCE_CMD_ENTER_CSTATE,
164 (uint64_t)TEGRA_NVG_CORE_C7,
165 MCE_CORE_SLEEP_TIME_INFINITE,
166 0U);
167 assert(ret == 0);
168
169 /* set system suspend state for house-keeping */
170 tegra194_set_system_suspend_entry();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700171 }
172
173 return PSCI_E_SUCCESS;
174}
175
176/*******************************************************************************
Varun Wadekar0723bb62017-10-16 15:57:17 -0700177 * Helper function to check if this is the last ON CPU in the cluster
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700178 ******************************************************************************/
Varun Wadekar0723bb62017-10-16 15:57:17 -0700179static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
180 uint32_t ncpu)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700181{
Varun Wadekar0723bb62017-10-16 15:57:17 -0700182 plat_local_state_t target;
183 bool last_on_cpu = true;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800184 uint32_t num_cpus = ncpu, pos = 0;
Varun Wadekar0723bb62017-10-16 15:57:17 -0700185
186 do {
187 target = states[pos];
188 if (target != PLAT_MAX_OFF_STATE) {
189 last_on_cpu = false;
190 }
191 --num_cpus;
192 pos++;
193 } while (num_cpus != 0U);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700194
Varun Wadekar0723bb62017-10-16 15:57:17 -0700195 return last_on_cpu;
196}
197
198/*******************************************************************************
199 * Helper function to get target power state for the cluster
200 ******************************************************************************/
201static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
202 uint32_t ncpu)
203{
204 uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
205 plat_local_state_t target = states[core_pos];
206 mce_cstate_info_t cstate_info = { 0 };
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700207
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700208 /* CPU off */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700209 if (target == PLAT_MAX_OFF_STATE) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700210
211 /* Enable cluster powerdn from last CPU in the cluster */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700212 if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700213
Varun Wadekar0723bb62017-10-16 15:57:17 -0700214 /* Enable CC6 state and turn off wake mask */
215 cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
Vignesh Radhakrishnan90d80192017-12-27 21:04:49 -0800216 cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
217 cstate_info.system_state_force = 1;
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700218 cstate_info.update_wake_mask = 1U;
219 mce_update_cstate_info(&cstate_info);
220
221 } else {
Varun Wadekar0723bb62017-10-16 15:57:17 -0700222
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700223 /* Turn off wake_mask */
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700224 cstate_info.update_wake_mask = 1U;
225 mce_update_cstate_info(&cstate_info);
Varun Wadekar0723bb62017-10-16 15:57:17 -0700226 target = PSCI_LOCAL_STATE_RUN;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700227 }
228 }
229
Varun Wadekar0723bb62017-10-16 15:57:17 -0700230 return target;
231}
232
233/*******************************************************************************
234 * Platform handler to calculate the proper target power level at the
235 * specified affinity level
236 ******************************************************************************/
237plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
238 const plat_local_state_t *states,
239 uint32_t ncpu)
240{
241 plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
242 uint32_t cpu = plat_my_core_pos();
243
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700244 /* System Suspend */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700245 if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
246 target = PSTATE_ID_SOC_POWERDN;
247 }
248
249 /* CPU off, CPU suspend */
250 if (lvl == (uint32_t)MPIDR_AFFLVL1) {
251 target = tegra_get_afflvl1_pwr_state(states, ncpu);
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800252 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700253
Varun Wadekar0723bb62017-10-16 15:57:17 -0700254 /* target cluster/system state */
255 return target;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700256}
257
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800258int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700259{
260 const plat_local_state_t *pwr_domain_state =
261 target_state->pwr_domain_state;
262 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800263 uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
Varun Wadekar362a6b22017-11-10 11:04:42 -0800264 TEGRA194_STATE_ID_MASK;
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530265 uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE;
Steven Kao55c2ce72016-12-23 15:51:32 +0800266 uint64_t val;
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530267 int32_t ret = PSCI_E_SUCCESS;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700268
269 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530270 val = params_from_bl2->tzdram_base +
271 tegra194_get_cpu_reset_handler_size();
272
273 /* initialise communication channel with BPMP */
274 ret = tegra_bpmp_ipc_init();
275 assert(ret == 0);
276
277 /* Enable SE clock before SE context save */
Varun Wadekare55c27b2018-09-13 08:47:43 -0700278 ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530279 assert(ret == 0);
280
281 /*
282 * It is very unlikely that the BL31 image would be
283 * bigger than 2^32 bytes
284 */
285 assert(src_len_in_bytes < UINT32_MAX);
286
287 if (tegra_se_calculate_save_sha256(BL31_BASE,
288 (uint32_t)src_len_in_bytes) != 0) {
289 ERROR("Hash calculation failed. Reboot\n");
290 (void)tegra_soc_prepare_system_reset();
291 }
292
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700293 /*
294 * The TZRAM loses power when we enter system suspend. To
295 * allow graceful exit from system suspend, we need to copy
296 * BL3-1 over to TZDRAM.
297 */
298 val = params_from_bl2->tzdram_base +
Varun Wadekare0c222f2017-11-10 13:23:34 -0800299 tegra194_get_cpu_reset_handler_size();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700300 memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530301 src_len_in_bytes);
302
303 /* Disable SE clock after SE context save */
Varun Wadekare55c27b2018-09-13 08:47:43 -0700304 ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530305 assert(ret == 0);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700306 }
307
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530308 return ret;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700309}
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700310
Varun Wadekarb5b15b22018-05-17 10:10:25 -0700311int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
312{
313 return PSCI_E_NOT_SUPPORTED;
314}
315
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800316int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700317{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800318 uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
319 uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700320 MPIDR_AFFINITY_BITS;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800321 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700322
Varun Wadekara4e0a812017-10-17 10:53:33 -0700323 if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700324 ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
325 return PSCI_E_NOT_PRESENT;
326 }
327
328 /* construct the target CPU # */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800329 target_cpu += (target_cluster << 1U);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700330
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800331 ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
332 if (ret < 0) {
333 return PSCI_E_DENIED;
334 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700335
336 return PSCI_E_SUCCESS;
337}
338
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800339int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700340{
Kalyani Chidambaramfcd1e882018-09-12 14:59:08 -0700341 const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
342 uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800343 uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
Kalyani Chidambaramfcd1e882018-09-12 14:59:08 -0700344 cpu_context_t *ctx = cm_get_context(NON_SECURE);
345 uint64_t actlr_elx;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700346
347 /*
348 * Reset power state info for CPUs when onlining, we set
349 * deepest power when offlining a core but that may not be
350 * requested by non-secure sw which controls idle states. It
351 * will re-init this info from non-secure software when the
352 * core come online.
353 */
Varun Wadekare5a39b02018-11-15 20:44:40 -0800354 actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1));
355 actlr_elx &= ~DENVER_CPU_PMSTATE_MASK;
356 actlr_elx |= DENVER_CPU_PMSTATE_C1;
357 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700358
359 /*
360 * Check if we are exiting from deep sleep and restore SE
361 * context if we are.
362 */
363 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800364
Steven Kao8f4f1022017-12-13 06:39:15 +0800365#if ENABLE_STRICT_CHECKING_MODE
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800366 /*
367 * Enable strict checking after programming the GSC for
368 * enabling TZSRAM and TZDRAM
369 */
370 mce_enable_strict_checking();
Steven Kao8f4f1022017-12-13 06:39:15 +0800371#endif
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800372
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700373 /* Init SMMU */
Vignesh Radhakrishnan978887f2017-07-11 15:16:08 -0700374 tegra_smmu_init();
375
Steven Kao530b2172017-06-23 16:18:58 +0800376 /* Resume SE, RNG1 and PKA1 */
377 tegra_se_resume();
378
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700379 /*
Varun Wadekar4edc17c2017-11-20 17:14:47 -0800380 * Program XUSB STREAMIDs
381 * ======================
382 * T19x XUSB has support for XUSB virtualization. It will
383 * have one physical function (PF) and four Virtual functions
384 * (VF)
385 *
386 * There were below two SIDs for XUSB until T186.
387 * 1) #define TEGRA_SID_XUSB_HOST 0x1bU
388 * 2) #define TEGRA_SID_XUSB_DEV 0x1cU
389 *
390 * We have below four new SIDs added for VF(s)
391 * 3) #define TEGRA_SID_XUSB_VF0 0x5dU
392 * 4) #define TEGRA_SID_XUSB_VF1 0x5eU
393 * 5) #define TEGRA_SID_XUSB_VF2 0x5fU
394 * 6) #define TEGRA_SID_XUSB_VF3 0x60U
395 *
396 * When virtualization is enabled then we have to disable SID
397 * override and program above SIDs in below newly added SID
398 * registers in XUSB PADCTL MMIO space. These registers are
399 * TZ protected and so need to be done in ATF.
400 *
401 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
402 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU)
403 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
404 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
405 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
406 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
407 *
408 * This change disables SID override and programs XUSB SIDs
409 * in above registers to support both virtualization and
410 * non-virtualization platforms
411 */
Varun Wadekara2eb6632018-03-23 10:44:40 -0700412 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
413
414 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
415 XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
416 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
417 XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
418 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
419 XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
420 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
421 XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
422 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
423 XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
424 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
425 XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
426 }
Kalyani Chidambaramfcd1e882018-09-12 14:59:08 -0700427 }
Varun Wadekar4edc17c2017-11-20 17:14:47 -0800428
Kalyani Chidambaramfcd1e882018-09-12 14:59:08 -0700429 /*
430 * Enable dual execution optimized translations for all ELx.
431 */
432 if (enable_ccplex_lock_step != 0U) {
433 actlr_elx = read_actlr_el3();
434 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3;
435 write_actlr_el3(actlr_elx);
436
437 actlr_elx = read_actlr_el2();
438 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2;
439 write_actlr_el2(actlr_elx);
440
441 actlr_elx = read_actlr_el1();
442 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1;
443 write_actlr_el1(actlr_elx);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700444 }
445
446 return PSCI_E_SUCCESS;
447}
448
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800449int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700450{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800451 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700452 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700453
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800454 (void)target_state;
455
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700456 /* Disable Denver's DCO operations */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800457 if (impl == DENVER_IMPL) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700458 denver_disable_dco();
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800459 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700460
461 /* Turn off CPU */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800462 ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
463 (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700464 assert(ret == 0);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700465
466 return PSCI_E_SUCCESS;
467}
468
469__dead2 void tegra_soc_prepare_system_off(void)
470{
471 /* System power off */
Vignesh Radhakrishnan2aaa41c2017-06-14 09:59:27 -0700472 mce_system_shutdown();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700473
474 wfi();
475
476 /* wait for the system to power down */
477 for (;;) {
478 ;
479 }
480}
481
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800482int32_t tegra_soc_prepare_system_reset(void)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700483{
Vignesh Radhakrishnan2aaa41c2017-06-14 09:59:27 -0700484 /* System reboot */
485 mce_system_reboot();
486
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700487 return PSCI_E_SUCCESS;
488}