blob: 83d815afc742b162260a80d0ee2a726dd16754f3 [file] [log] [blame]
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07001/*
Varun Wadekar4edc17c2017-11-20 17:14:47 -08002 * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07008#include <assert.h>
Steven Kao530b2172017-06-23 16:18:58 +08009#include <stdbool.h>
10#include <string.h>
11
12#include <arch_helpers.h>
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +053013#include <bpmp_ipc.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070014#include <common/bl_common.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070015#include <common/debug.h>
Steven Kao530b2172017-06-23 16:18:58 +080016#include <context.h>
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +053017#include <drivers/delay_timer.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070018#include <denver.h>
Steven Kao530b2172017-06-23 16:18:58 +080019#include <lib/el3_runtime/context_mgmt.h>
20#include <lib/psci/psci.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070021#include <mce.h>
Dilan Lee4e7a63c2017-08-10 16:01:42 +080022#include <mce_private.h>
Pritesh Raithatha75c94432018-08-03 15:48:15 +053023#include <memctrl_v2.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070024#include <plat/common/platform.h>
Steven Kao530b2172017-06-23 16:18:58 +080025#include <se.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070026#include <smmu.h>
Tejal Kudav153ba222017-02-14 18:02:04 -080027#include <t194_nvg.h>
Varun Wadekare0c222f2017-11-10 13:23:34 -080028#include <tegra194_private.h>
Steven Kao530b2172017-06-23 16:18:58 +080029#include <tegra_platform.h>
30#include <tegra_private.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070031
Varun Wadekar362a6b22017-11-10 11:04:42 -080032extern uint32_t __tegra194_cpu_reset_handler_data,
33 __tegra194_cpu_reset_handler_end;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070034
35/* TZDRAM offset for saving SMMU context */
Varun Wadekar362a6b22017-11-10 11:04:42 -080036#define TEGRA194_SMMU_CTX_OFFSET 16U
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070037
38/* state id mask */
Varun Wadekar362a6b22017-11-10 11:04:42 -080039#define TEGRA194_STATE_ID_MASK 0xFU
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070040/* constants to get power state's wake time */
Varun Wadekar362a6b22017-11-10 11:04:42 -080041#define TEGRA194_WAKE_TIME_MASK 0x0FFFFFF0U
42#define TEGRA194_WAKE_TIME_SHIFT 4U
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070043/* default core wake mask for CPU_SUSPEND */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080044#define TEGRA194_CORE_WAKE_MASK 0x180cU
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070045
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080046static struct t19x_psci_percpu_data {
47 uint32_t wake_time;
48} __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070049
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080050int32_t tegra_soc_validate_power_state(uint32_t power_state,
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070051 psci_power_state_t *req_state)
52{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080053 uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
Varun Wadekar362a6b22017-11-10 11:04:42 -080054 TEGRA194_STATE_ID_MASK;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080055 uint32_t cpu = plat_my_core_pos();
56 int32_t ret = PSCI_E_SUCCESS;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070057
58 /* save the core wake time (in TSC ticks)*/
Varun Wadekar362a6b22017-11-10 11:04:42 -080059 t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
60 << TEGRA194_WAKE_TIME_SHIFT;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070061
62 /*
Varun Wadekar56c64592019-12-03 08:50:57 -080063 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
64 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
65 * which is called with caches disabled. It is possible to read a stale
66 * value from DRAM in that function, because the L2 cache is not flushed
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070067 * unless the cluster is entering CC6/CC7.
68 */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080069 clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
70 sizeof(t19x_percpu_data[cpu]));
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070071
72 /* Sanity check the requested state id */
73 switch (state_id) {
74 case PSTATE_ID_CORE_IDLE:
Varun Wadekarc61094b2017-12-27 18:01:59 -080075
Varun Wadekarcb627622020-05-05 22:44:20 -070076 if (psci_get_pstate_type(power_state) != PSTATE_TYPE_STANDBY) {
77 ret = PSCI_E_INVALID_PARAMS;
78 break;
79 }
80
Varun Wadekarc61094b2017-12-27 18:01:59 -080081 /* Core idle request */
82 req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
83 req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
84 break;
85
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070086 default:
87 ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080088 ret = PSCI_E_INVALID_PARAMS;
89 break;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070090 }
91
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080092 return ret;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070093}
94
Varun Wadekarc61094b2017-12-27 18:01:59 -080095int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
96{
97 uint32_t cpu = plat_my_core_pos();
98 mce_cstate_info_t cstate_info = { 0 };
99
100 /* Program default wake mask */
101 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
102 cstate_info.update_wake_mask = 1;
103 mce_update_cstate_info(&cstate_info);
104
105 /* Enter CPU idle */
106 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
107 (uint64_t)TEGRA_NVG_CORE_C6,
108 t19x_percpu_data[cpu].wake_time,
109 0U);
110
111 return PSCI_E_SUCCESS;
112}
113
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800114int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700115{
116 const plat_local_state_t *pwr_domain_state;
Varun Wadekar83e4d4b2020-04-23 09:56:06 -0700117 uint8_t stateid_afflvl2;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700118 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
Pritesh Raithatha75c94432018-08-03 15:48:15 +0530119 uint64_t mc_ctx_base;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700120 uint32_t val;
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700121 mce_cstate_info_t sc7_cstate_info = {
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800122 .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
Vignesh Radhakrishnan85c129f2017-12-20 15:04:26 -0800123 .ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800124 .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
125 .system_state_force = 1U,
126 .update_wake_mask = 1U,
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700127 };
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700128 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700129
130 /* get the state ID */
131 pwr_domain_state = target_state->pwr_domain_state;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700132 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
Varun Wadekar362a6b22017-11-10 11:04:42 -0800133 TEGRA194_STATE_ID_MASK;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700134
Varun Wadekar83e4d4b2020-04-23 09:56:06 -0700135 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700136
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700137 /* save 'Secure Boot' Processor Feature Config Register */
138 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
Steven Kao4607f172017-10-23 18:35:14 +0800139 mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700140
Pritesh Raithatha75c94432018-08-03 15:48:15 +0530141 /* save MC context */
142 mc_ctx_base = params_from_bl2->tzdram_base +
143 tegra194_get_mc_ctx_offset();
144 tegra_mc_save_context((uintptr_t)mc_ctx_base);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700145
Steven Kao530b2172017-06-23 16:18:58 +0800146 /*
147 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
148 * since VDK does not support atomic se ctx save
149 */
150 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
151 ret = tegra_se_suspend();
152 assert(ret == 0);
153 }
154
Varun Wadekar953699c2018-06-06 17:26:10 -0700155 /* Prepare for system suspend */
156 mce_update_cstate_info(&sc7_cstate_info);
Tejal Kudav153ba222017-02-14 18:02:04 -0800157
Varun Wadekar953699c2018-06-06 17:26:10 -0700158 do {
159 val = (uint32_t)mce_command_handler(
160 (uint32_t)MCE_CMD_IS_SC7_ALLOWED,
161 (uint32_t)TEGRA_NVG_CORE_C7,
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800162 MCE_CORE_SLEEP_TIME_INFINITE,
163 0U);
Varun Wadekar953699c2018-06-06 17:26:10 -0700164 } while (val == 0U);
Varun Wadekarda865de2017-11-10 13:27:29 -0800165
Varun Wadekar953699c2018-06-06 17:26:10 -0700166 /* Instruct the MCE to enter system suspend state */
167 ret = mce_command_handler(
168 (uint64_t)MCE_CMD_ENTER_CSTATE,
169 (uint64_t)TEGRA_NVG_CORE_C7,
170 MCE_CORE_SLEEP_TIME_INFINITE,
171 0U);
172 assert(ret == 0);
173
174 /* set system suspend state for house-keeping */
175 tegra194_set_system_suspend_entry();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700176 }
177
178 return PSCI_E_SUCCESS;
179}
180
181/*******************************************************************************
Varun Wadekar0723bb62017-10-16 15:57:17 -0700182 * Helper function to check if this is the last ON CPU in the cluster
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700183 ******************************************************************************/
Varun Wadekar0723bb62017-10-16 15:57:17 -0700184static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
185 uint32_t ncpu)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700186{
Varun Wadekar0723bb62017-10-16 15:57:17 -0700187 plat_local_state_t target;
188 bool last_on_cpu = true;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800189 uint32_t num_cpus = ncpu, pos = 0;
Varun Wadekar0723bb62017-10-16 15:57:17 -0700190
191 do {
192 target = states[pos];
193 if (target != PLAT_MAX_OFF_STATE) {
194 last_on_cpu = false;
195 }
196 --num_cpus;
197 pos++;
198 } while (num_cpus != 0U);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700199
Varun Wadekar0723bb62017-10-16 15:57:17 -0700200 return last_on_cpu;
201}
202
203/*******************************************************************************
204 * Helper function to get target power state for the cluster
205 ******************************************************************************/
206static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
207 uint32_t ncpu)
208{
209 uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
210 plat_local_state_t target = states[core_pos];
211 mce_cstate_info_t cstate_info = { 0 };
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700212
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700213 /* CPU off */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700214 if (target == PLAT_MAX_OFF_STATE) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700215
216 /* Enable cluster powerdn from last CPU in the cluster */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700217 if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700218
Varun Wadekar0723bb62017-10-16 15:57:17 -0700219 /* Enable CC6 state and turn off wake mask */
220 cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
Vignesh Radhakrishnan90d80192017-12-27 21:04:49 -0800221 cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
222 cstate_info.system_state_force = 1;
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700223 cstate_info.update_wake_mask = 1U;
224 mce_update_cstate_info(&cstate_info);
225
226 } else {
Varun Wadekar0723bb62017-10-16 15:57:17 -0700227
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700228 /* Turn off wake_mask */
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700229 cstate_info.update_wake_mask = 1U;
230 mce_update_cstate_info(&cstate_info);
Varun Wadekar0723bb62017-10-16 15:57:17 -0700231 target = PSCI_LOCAL_STATE_RUN;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700232 }
233 }
234
Varun Wadekar0723bb62017-10-16 15:57:17 -0700235 return target;
236}
237
238/*******************************************************************************
239 * Platform handler to calculate the proper target power level at the
240 * specified affinity level
241 ******************************************************************************/
242plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
243 const plat_local_state_t *states,
244 uint32_t ncpu)
245{
246 plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
247 uint32_t cpu = plat_my_core_pos();
248
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700249 /* System Suspend */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700250 if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
251 target = PSTATE_ID_SOC_POWERDN;
252 }
253
254 /* CPU off, CPU suspend */
255 if (lvl == (uint32_t)MPIDR_AFFLVL1) {
256 target = tegra_get_afflvl1_pwr_state(states, ncpu);
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800257 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700258
Varun Wadekar0723bb62017-10-16 15:57:17 -0700259 /* target cluster/system state */
260 return target;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700261}
262
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800263int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700264{
265 const plat_local_state_t *pwr_domain_state =
266 target_state->pwr_domain_state;
267 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800268 uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
Varun Wadekar362a6b22017-11-10 11:04:42 -0800269 TEGRA194_STATE_ID_MASK;
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530270 uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE;
Steven Kao55c2ce72016-12-23 15:51:32 +0800271 uint64_t val;
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530272 int32_t ret = PSCI_E_SUCCESS;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700273
274 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530275 val = params_from_bl2->tzdram_base +
276 tegra194_get_cpu_reset_handler_size();
277
278 /* initialise communication channel with BPMP */
279 ret = tegra_bpmp_ipc_init();
280 assert(ret == 0);
281
282 /* Enable SE clock before SE context save */
Varun Wadekare55c27b2018-09-13 08:47:43 -0700283 ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530284 assert(ret == 0);
285
286 /*
287 * It is very unlikely that the BL31 image would be
288 * bigger than 2^32 bytes
289 */
290 assert(src_len_in_bytes < UINT32_MAX);
291
292 if (tegra_se_calculate_save_sha256(BL31_BASE,
293 (uint32_t)src_len_in_bytes) != 0) {
294 ERROR("Hash calculation failed. Reboot\n");
295 (void)tegra_soc_prepare_system_reset();
296 }
297
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700298 /*
299 * The TZRAM loses power when we enter system suspend. To
300 * allow graceful exit from system suspend, we need to copy
301 * BL3-1 over to TZDRAM.
302 */
303 val = params_from_bl2->tzdram_base +
Varun Wadekare0c222f2017-11-10 13:23:34 -0800304 tegra194_get_cpu_reset_handler_size();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700305 memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530306 src_len_in_bytes);
307
308 /* Disable SE clock after SE context save */
Varun Wadekare55c27b2018-09-13 08:47:43 -0700309 ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530310 assert(ret == 0);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700311 }
312
Jeetesh Burmandbcc95c2018-07-06 20:03:38 +0530313 return ret;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700314}
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700315
Varun Wadekarb5b15b22018-05-17 10:10:25 -0700316int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
317{
318 return PSCI_E_NOT_SUPPORTED;
319}
320
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800321int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700322{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800323 uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
324 uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700325 MPIDR_AFFINITY_BITS;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800326 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700327
Varun Wadekara4e0a812017-10-17 10:53:33 -0700328 if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700329 ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
330 return PSCI_E_NOT_PRESENT;
331 }
332
333 /* construct the target CPU # */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800334 target_cpu += (target_cluster << 1U);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700335
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800336 ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
337 if (ret < 0) {
338 return PSCI_E_DENIED;
339 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700340
341 return PSCI_E_SUCCESS;
342}
343
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800344int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700345{
Kalyani Chidambaramfcd1e882018-09-12 14:59:08 -0700346 const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
347 uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800348 uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
Kalyani Chidambaramfcd1e882018-09-12 14:59:08 -0700349 cpu_context_t *ctx = cm_get_context(NON_SECURE);
350 uint64_t actlr_elx;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700351
352 /*
353 * Reset power state info for CPUs when onlining, we set
354 * deepest power when offlining a core but that may not be
355 * requested by non-secure sw which controls idle states. It
356 * will re-init this info from non-secure software when the
357 * core come online.
358 */
Varun Wadekare5a39b02018-11-15 20:44:40 -0800359 actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1));
360 actlr_elx &= ~DENVER_CPU_PMSTATE_MASK;
361 actlr_elx |= DENVER_CPU_PMSTATE_C1;
362 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700363
364 /*
365 * Check if we are exiting from deep sleep and restore SE
366 * context if we are.
367 */
368 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800369
Steven Kao8f4f1022017-12-13 06:39:15 +0800370#if ENABLE_STRICT_CHECKING_MODE
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800371 /*
372 * Enable strict checking after programming the GSC for
373 * enabling TZSRAM and TZDRAM
374 */
375 mce_enable_strict_checking();
Steven Kao8f4f1022017-12-13 06:39:15 +0800376#endif
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800377
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700378 /* Init SMMU */
Vignesh Radhakrishnan978887f2017-07-11 15:16:08 -0700379 tegra_smmu_init();
380
Steven Kao530b2172017-06-23 16:18:58 +0800381 /* Resume SE, RNG1 and PKA1 */
382 tegra_se_resume();
383
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700384 /*
Varun Wadekar4edc17c2017-11-20 17:14:47 -0800385 * Program XUSB STREAMIDs
386 * ======================
387 * T19x XUSB has support for XUSB virtualization. It will
388 * have one physical function (PF) and four Virtual functions
389 * (VF)
390 *
391 * There were below two SIDs for XUSB until T186.
392 * 1) #define TEGRA_SID_XUSB_HOST 0x1bU
393 * 2) #define TEGRA_SID_XUSB_DEV 0x1cU
394 *
395 * We have below four new SIDs added for VF(s)
396 * 3) #define TEGRA_SID_XUSB_VF0 0x5dU
397 * 4) #define TEGRA_SID_XUSB_VF1 0x5eU
398 * 5) #define TEGRA_SID_XUSB_VF2 0x5fU
399 * 6) #define TEGRA_SID_XUSB_VF3 0x60U
400 *
401 * When virtualization is enabled then we have to disable SID
402 * override and program above SIDs in below newly added SID
403 * registers in XUSB PADCTL MMIO space. These registers are
404 * TZ protected and so need to be done in ATF.
405 *
406 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
407 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU)
408 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
409 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
410 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
411 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
412 *
413 * This change disables SID override and programs XUSB SIDs
414 * in above registers to support both virtualization and
415 * non-virtualization platforms
416 */
Varun Wadekara2eb6632018-03-23 10:44:40 -0700417 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
418
419 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
420 XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
Anthony Zhou9de77f62019-11-13 18:36:07 +0800421 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
422 XUSB_PADCTL_HOST_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_HOST);
Varun Wadekara2eb6632018-03-23 10:44:40 -0700423 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
424 XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
Anthony Zhou9de77f62019-11-13 18:36:07 +0800425 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
426 XUSB_PADCTL_HOST_AXI_STREAMID_VF_0) == TEGRA_SID_XUSB_VF0);
Varun Wadekara2eb6632018-03-23 10:44:40 -0700427 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
428 XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
Anthony Zhou9de77f62019-11-13 18:36:07 +0800429 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
430 XUSB_PADCTL_HOST_AXI_STREAMID_VF_1) == TEGRA_SID_XUSB_VF1);
Varun Wadekara2eb6632018-03-23 10:44:40 -0700431 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
432 XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
Anthony Zhou9de77f62019-11-13 18:36:07 +0800433 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
434 XUSB_PADCTL_HOST_AXI_STREAMID_VF_2) == TEGRA_SID_XUSB_VF2);
Varun Wadekara2eb6632018-03-23 10:44:40 -0700435 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
436 XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
Anthony Zhou9de77f62019-11-13 18:36:07 +0800437 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
438 XUSB_PADCTL_HOST_AXI_STREAMID_VF_3) == TEGRA_SID_XUSB_VF3);
Varun Wadekara2eb6632018-03-23 10:44:40 -0700439 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
440 XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
Anthony Zhou9de77f62019-11-13 18:36:07 +0800441 assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
442 XUSB_PADCTL_DEV_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_DEV);
Varun Wadekara2eb6632018-03-23 10:44:40 -0700443 }
Kalyani Chidambaramfcd1e882018-09-12 14:59:08 -0700444 }
Varun Wadekar4edc17c2017-11-20 17:14:47 -0800445
Kalyani Chidambaramfcd1e882018-09-12 14:59:08 -0700446 /*
447 * Enable dual execution optimized translations for all ELx.
448 */
449 if (enable_ccplex_lock_step != 0U) {
450 actlr_elx = read_actlr_el3();
451 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3;
452 write_actlr_el3(actlr_elx);
453
454 actlr_elx = read_actlr_el2();
455 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2;
456 write_actlr_el2(actlr_elx);
457
458 actlr_elx = read_actlr_el1();
459 actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1;
460 write_actlr_el1(actlr_elx);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700461 }
462
463 return PSCI_E_SUCCESS;
464}
465
Varun Wadekarcb2dd3a2023-04-25 14:58:33 +0100466int32_t tegra_soc_pwr_domain_off_early(const psci_power_state_t *target_state)
467{
468 /* Do not power off the boot CPU */
469 if (plat_is_my_cpu_primary()) {
470 return PSCI_E_DENIED;
471 }
472
473 return PSCI_E_SUCCESS;
474}
475
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800476int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700477{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800478 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700479 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700480
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800481 (void)target_state;
482
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700483 /* Disable Denver's DCO operations */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800484 if (impl == DENVER_IMPL) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700485 denver_disable_dco();
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800486 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700487
488 /* Turn off CPU */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800489 ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
490 (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700491 assert(ret == 0);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700492
493 return PSCI_E_SUCCESS;
494}
495
496__dead2 void tegra_soc_prepare_system_off(void)
497{
498 /* System power off */
Vignesh Radhakrishnan2aaa41c2017-06-14 09:59:27 -0700499 mce_system_shutdown();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700500
501 wfi();
502
503 /* wait for the system to power down */
504 for (;;) {
505 ;
506 }
507}
508
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800509int32_t tegra_soc_prepare_system_reset(void)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700510{
Vignesh Radhakrishnan2aaa41c2017-06-14 09:59:27 -0700511 /* System reboot */
512 mce_system_reboot();
513
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700514 return PSCI_E_SUCCESS;
515}