blob: 5e27455ec2d40e9876768cb8b07d30660b53e9cb [file] [log] [blame]
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07001/*
Varun Wadekar4edc17c2017-11-20 17:14:47 -08002 * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -07008#include <assert.h>
Steven Kao530b2172017-06-23 16:18:58 +08009#include <stdbool.h>
10#include <string.h>
11
12#include <arch_helpers.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070013#include <common/bl_common.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070014#include <common/debug.h>
Steven Kao530b2172017-06-23 16:18:58 +080015#include <context.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070016#include <denver.h>
Steven Kao530b2172017-06-23 16:18:58 +080017#include <lib/el3_runtime/context_mgmt.h>
18#include <lib/psci/psci.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070019#include <mce.h>
Dilan Lee4e7a63c2017-08-10 16:01:42 +080020#include <mce_private.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070021#include <plat/common/platform.h>
Steven Kao530b2172017-06-23 16:18:58 +080022#include <se.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070023#include <smmu.h>
Tejal Kudav153ba222017-02-14 18:02:04 -080024#include <t194_nvg.h>
Varun Wadekare0c222f2017-11-10 13:23:34 -080025#include <tegra194_private.h>
Steven Kao530b2172017-06-23 16:18:58 +080026#include <tegra_platform.h>
27#include <tegra_private.h>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070028
Varun Wadekar362a6b22017-11-10 11:04:42 -080029extern uint32_t __tegra194_cpu_reset_handler_data,
30 __tegra194_cpu_reset_handler_end;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070031
32/* TZDRAM offset for saving SMMU context */
Varun Wadekar362a6b22017-11-10 11:04:42 -080033#define TEGRA194_SMMU_CTX_OFFSET 16U
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070034
35/* state id mask */
Varun Wadekar362a6b22017-11-10 11:04:42 -080036#define TEGRA194_STATE_ID_MASK 0xFU
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070037/* constants to get power state's wake time */
Varun Wadekar362a6b22017-11-10 11:04:42 -080038#define TEGRA194_WAKE_TIME_MASK 0x0FFFFFF0U
39#define TEGRA194_WAKE_TIME_SHIFT 4U
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070040/* default core wake mask for CPU_SUSPEND */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080041#define TEGRA194_CORE_WAKE_MASK 0x180cU
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070042
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080043static struct t19x_psci_percpu_data {
44 uint32_t wake_time;
45} __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070046
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -070047/*
48 * tegra_fake_system_suspend acts as a boolean var controlling whether
49 * we are going to take fake system suspend code or normal system suspend code
50 * path. This variable is set inside the sip call handlers, when the kernel
51 * requests an SIP call to set the suspend debug flags.
52 */
53bool tegra_fake_system_suspend;
54
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080055int32_t tegra_soc_validate_power_state(uint32_t power_state,
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070056 psci_power_state_t *req_state)
57{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080058 uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
Varun Wadekar362a6b22017-11-10 11:04:42 -080059 TEGRA194_STATE_ID_MASK;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080060 uint32_t cpu = plat_my_core_pos();
61 int32_t ret = PSCI_E_SUCCESS;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070062
63 /* save the core wake time (in TSC ticks)*/
Varun Wadekar362a6b22017-11-10 11:04:42 -080064 t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
65 << TEGRA194_WAKE_TIME_SHIFT;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070066
67 /*
Varun Wadekar56c64592019-12-03 08:50:57 -080068 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
69 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
70 * which is called with caches disabled. It is possible to read a stale
71 * value from DRAM in that function, because the L2 cache is not flushed
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070072 * unless the cluster is entering CC6/CC7.
73 */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080074 clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
75 sizeof(t19x_percpu_data[cpu]));
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070076
77 /* Sanity check the requested state id */
78 switch (state_id) {
79 case PSTATE_ID_CORE_IDLE:
Varun Wadekarc61094b2017-12-27 18:01:59 -080080
81 /* Core idle request */
82 req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
83 req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
84 break;
85
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070086 case PSTATE_ID_CORE_POWERDN:
87
88 /* Core powerdown request */
89 req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
90 req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
91
92 break;
93
94 default:
95 ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +080096 ret = PSCI_E_INVALID_PARAMS;
97 break;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -070098 }
99
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800100 return ret;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700101}
102
Varun Wadekarc61094b2017-12-27 18:01:59 -0800103int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
104{
105 uint32_t cpu = plat_my_core_pos();
106 mce_cstate_info_t cstate_info = { 0 };
107
108 /* Program default wake mask */
109 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
110 cstate_info.update_wake_mask = 1;
111 mce_update_cstate_info(&cstate_info);
112
113 /* Enter CPU idle */
114 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
115 (uint64_t)TEGRA_NVG_CORE_C6,
116 t19x_percpu_data[cpu].wake_time,
117 0U);
118
119 return PSCI_E_SUCCESS;
120}
121
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800122int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700123{
124 const plat_local_state_t *pwr_domain_state;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800125 uint8_t stateid_afflvl0, stateid_afflvl2;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700126 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
127 uint64_t smmu_ctx_base;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700128 uint32_t val;
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700129 mce_cstate_info_t sc7_cstate_info = {
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800130 .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
Vignesh Radhakrishnan85c129f2017-12-20 15:04:26 -0800131 .ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800132 .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
133 .system_state_force = 1U,
134 .update_wake_mask = 1U,
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700135 };
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800136 uint32_t cpu = plat_my_core_pos();
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700137 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700138
139 /* get the state ID */
140 pwr_domain_state = target_state->pwr_domain_state;
141 stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
Varun Wadekar362a6b22017-11-10 11:04:42 -0800142 TEGRA194_STATE_ID_MASK;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700143 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
Varun Wadekar362a6b22017-11-10 11:04:42 -0800144 TEGRA194_STATE_ID_MASK;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700145
Varun Wadekarc61094b2017-12-27 18:01:59 -0800146 if ((stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700147
Varun Wadekarc61094b2017-12-27 18:01:59 -0800148 /* Enter CPU powerdown */
149 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
150 (uint64_t)TEGRA_NVG_CORE_C7,
151 t19x_percpu_data[cpu].wake_time,
152 0U);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700153
154 } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
155
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700156 /* save 'Secure Boot' Processor Feature Config Register */
157 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
Steven Kao4607f172017-10-23 18:35:14 +0800158 mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700159
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700160 /* save SMMU context */
161 smmu_ctx_base = params_from_bl2->tzdram_base +
Varun Wadekare0c222f2017-11-10 13:23:34 -0800162 tegra194_get_smmu_ctx_offset();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700163 tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700164
Steven Kao530b2172017-06-23 16:18:58 +0800165 /*
166 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
167 * since VDK does not support atomic se ctx save
168 */
169 if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
170 ret = tegra_se_suspend();
171 assert(ret == 0);
172 }
173
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700174 if (!tegra_fake_system_suspend) {
Vignesh Radhakrishnan0e2502f2017-04-10 15:07:39 -0700175
176 /* Prepare for system suspend */
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700177 mce_update_cstate_info(&sc7_cstate_info);
Vignesh Radhakrishnan0e2502f2017-04-10 15:07:39 -0700178
179 do {
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800180 val = (uint32_t)mce_command_handler(
181 (uint32_t)MCE_CMD_IS_SC7_ALLOWED,
182 (uint32_t)TEGRA_NVG_CORE_C7,
Vignesh Radhakrishnan0e2502f2017-04-10 15:07:39 -0700183 MCE_CORE_SLEEP_TIME_INFINITE,
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800184 0U);
185 } while (val == 0U);
Tejal Kudav153ba222017-02-14 18:02:04 -0800186
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800187 /* Instruct the MCE to enter system suspend state */
188 ret = mce_command_handler(
189 (uint64_t)MCE_CMD_ENTER_CSTATE,
190 (uint64_t)TEGRA_NVG_CORE_C7,
191 MCE_CORE_SLEEP_TIME_INFINITE,
192 0U);
193 assert(ret == 0);
Varun Wadekarda865de2017-11-10 13:27:29 -0800194
195 /* set system suspend state for house-keeping */
196 tegra194_set_system_suspend_entry();
Vignesh Radhakrishnan0e2502f2017-04-10 15:07:39 -0700197 }
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800198 } else {
199 ; /* do nothing */
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700200 }
201
202 return PSCI_E_SUCCESS;
203}
204
205/*******************************************************************************
Varun Wadekar0723bb62017-10-16 15:57:17 -0700206 * Helper function to check if this is the last ON CPU in the cluster
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700207 ******************************************************************************/
Varun Wadekar0723bb62017-10-16 15:57:17 -0700208static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
209 uint32_t ncpu)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700210{
Varun Wadekar0723bb62017-10-16 15:57:17 -0700211 plat_local_state_t target;
212 bool last_on_cpu = true;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800213 uint32_t num_cpus = ncpu, pos = 0;
Varun Wadekar0723bb62017-10-16 15:57:17 -0700214
215 do {
216 target = states[pos];
217 if (target != PLAT_MAX_OFF_STATE) {
218 last_on_cpu = false;
219 }
220 --num_cpus;
221 pos++;
222 } while (num_cpus != 0U);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700223
Varun Wadekar0723bb62017-10-16 15:57:17 -0700224 return last_on_cpu;
225}
226
227/*******************************************************************************
228 * Helper function to get target power state for the cluster
229 ******************************************************************************/
230static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
231 uint32_t ncpu)
232{
233 uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
234 plat_local_state_t target = states[core_pos];
235 mce_cstate_info_t cstate_info = { 0 };
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700236
237 /* CPU suspend */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700238 if (target == PSTATE_ID_CORE_POWERDN) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700239
240 /* Program default wake mask */
Krishna Sitaramanc64afeb2017-01-23 16:15:44 -0800241 cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
242 cstate_info.update_wake_mask = 1;
243 mce_update_cstate_info(&cstate_info);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700244 }
245
246 /* CPU off */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700247 if (target == PLAT_MAX_OFF_STATE) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700248
249 /* Enable cluster powerdn from last CPU in the cluster */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700250 if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700251
Varun Wadekar0723bb62017-10-16 15:57:17 -0700252 /* Enable CC6 state and turn off wake mask */
253 cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
Vignesh Radhakrishnan90d80192017-12-27 21:04:49 -0800254 cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
255 cstate_info.system_state_force = 1;
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700256 cstate_info.update_wake_mask = 1U;
257 mce_update_cstate_info(&cstate_info);
258
259 } else {
Varun Wadekar0723bb62017-10-16 15:57:17 -0700260
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700261 /* Turn off wake_mask */
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700262 cstate_info.update_wake_mask = 1U;
263 mce_update_cstate_info(&cstate_info);
Varun Wadekar0723bb62017-10-16 15:57:17 -0700264 target = PSCI_LOCAL_STATE_RUN;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700265 }
266 }
267
Varun Wadekar0723bb62017-10-16 15:57:17 -0700268 return target;
269}
270
271/*******************************************************************************
272 * Platform handler to calculate the proper target power level at the
273 * specified affinity level
274 ******************************************************************************/
275plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
276 const plat_local_state_t *states,
277 uint32_t ncpu)
278{
279 plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
280 uint32_t cpu = plat_my_core_pos();
281
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700282 /* System Suspend */
Varun Wadekar0723bb62017-10-16 15:57:17 -0700283 if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
284 target = PSTATE_ID_SOC_POWERDN;
285 }
286
287 /* CPU off, CPU suspend */
288 if (lvl == (uint32_t)MPIDR_AFFLVL1) {
289 target = tegra_get_afflvl1_pwr_state(states, ncpu);
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800290 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700291
Varun Wadekar0723bb62017-10-16 15:57:17 -0700292 /* target cluster/system state */
293 return target;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700294}
295
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800296int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700297{
298 const plat_local_state_t *pwr_domain_state =
299 target_state->pwr_domain_state;
300 plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800301 uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
Varun Wadekar362a6b22017-11-10 11:04:42 -0800302 TEGRA194_STATE_ID_MASK;
Steven Kao55c2ce72016-12-23 15:51:32 +0800303 uint64_t val;
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700304 u_register_t ns_sctlr_el1;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700305
306 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
307 /*
308 * The TZRAM loses power when we enter system suspend. To
309 * allow graceful exit from system suspend, we need to copy
310 * BL3-1 over to TZDRAM.
311 */
312 val = params_from_bl2->tzdram_base +
Varun Wadekare0c222f2017-11-10 13:23:34 -0800313 tegra194_get_cpu_reset_handler_size();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700314 memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
315 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700316
Vignesh Radhakrishnand7a5c252017-05-25 16:27:42 -0700317 /*
318 * In fake suspend mode, ensure that the loopback procedure
319 * towards system suspend exit is started, instead of calling
320 * WFI. This is done by disabling both MMU's of EL1 & El3
321 * and calling tegra_secure_entrypoint().
322 */
323 if (tegra_fake_system_suspend) {
324
325 /*
326 * Disable EL1's MMU.
327 */
328 ns_sctlr_el1 = read_sctlr_el1();
329 ns_sctlr_el1 &= (~((u_register_t)SCTLR_M_BIT));
330 write_sctlr_el1(ns_sctlr_el1);
331
332 /*
333 * Disable MMU to power up the CPU in a "clean"
334 * state
335 */
336 disable_mmu_el3();
337 tegra_secure_entrypoint();
338 panic();
339 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700340 }
341
342 return PSCI_E_SUCCESS;
343}
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700344
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800345int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700346{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800347 uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
348 uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700349 MPIDR_AFFINITY_BITS;
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800350 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700351
Varun Wadekara4e0a812017-10-17 10:53:33 -0700352 if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700353 ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
354 return PSCI_E_NOT_PRESENT;
355 }
356
357 /* construct the target CPU # */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800358 target_cpu += (target_cluster << 1U);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700359
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800360 ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
361 if (ret < 0) {
362 return PSCI_E_DENIED;
363 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700364
365 return PSCI_E_SUCCESS;
366}
367
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800368int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700369{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800370 uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700371
372 /*
373 * Reset power state info for CPUs when onlining, we set
374 * deepest power when offlining a core but that may not be
375 * requested by non-secure sw which controls idle states. It
376 * will re-init this info from non-secure software when the
377 * core come online.
378 */
379
380 /*
381 * Check if we are exiting from deep sleep and restore SE
382 * context if we are.
383 */
384 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800385
Steven Kao8f4f1022017-12-13 06:39:15 +0800386#if ENABLE_STRICT_CHECKING_MODE
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800387 /*
388 * Enable strict checking after programming the GSC for
389 * enabling TZSRAM and TZDRAM
390 */
391 mce_enable_strict_checking();
Steven Kao8f4f1022017-12-13 06:39:15 +0800392#endif
Dilan Lee4e7a63c2017-08-10 16:01:42 +0800393
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700394 /* Init SMMU */
Vignesh Radhakrishnan978887f2017-07-11 15:16:08 -0700395 tegra_smmu_init();
396
Steven Kao530b2172017-06-23 16:18:58 +0800397 /* Resume SE, RNG1 and PKA1 */
398 tegra_se_resume();
399
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700400 /*
Varun Wadekar4edc17c2017-11-20 17:14:47 -0800401 * Program XUSB STREAMIDs
402 * ======================
403 * T19x XUSB has support for XUSB virtualization. It will
404 * have one physical function (PF) and four Virtual functions
405 * (VF)
406 *
407 * There were below two SIDs for XUSB until T186.
408 * 1) #define TEGRA_SID_XUSB_HOST 0x1bU
409 * 2) #define TEGRA_SID_XUSB_DEV 0x1cU
410 *
411 * We have below four new SIDs added for VF(s)
412 * 3) #define TEGRA_SID_XUSB_VF0 0x5dU
413 * 4) #define TEGRA_SID_XUSB_VF1 0x5eU
414 * 5) #define TEGRA_SID_XUSB_VF2 0x5fU
415 * 6) #define TEGRA_SID_XUSB_VF3 0x60U
416 *
417 * When virtualization is enabled then we have to disable SID
418 * override and program above SIDs in below newly added SID
419 * registers in XUSB PADCTL MMIO space. These registers are
420 * TZ protected and so need to be done in ATF.
421 *
422 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
423 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU)
424 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
425 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
426 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
427 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
428 *
429 * This change disables SID override and programs XUSB SIDs
430 * in above registers to support both virtualization and
431 * non-virtualization platforms
432 */
433 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
434 XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
435 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
436 XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
437 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
438 XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
439 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
440 XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
441 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
442 XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
443 mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
444 XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
445
446 /*
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700447 * Reset power state info for the last core doing SC7
448 * entry and exit, we set deepest power state as CC7
449 * and SC7 for SC7 entry which may not be requested by
450 * non-secure SW which controls idle states.
451 */
452 }
453
454 return PSCI_E_SUCCESS;
455}
456
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800457int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700458{
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800459 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700460 int32_t ret = 0;
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700461
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800462 (void)target_state;
463
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700464 /* Disable Denver's DCO operations */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800465 if (impl == DENVER_IMPL) {
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700466 denver_disable_dco();
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800467 }
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700468
469 /* Turn off CPU */
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800470 ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
471 (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
Krishna Sitaraman74813f92017-07-14 13:51:44 -0700472 assert(ret == 0);
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700473
474 return PSCI_E_SUCCESS;
475}
476
477__dead2 void tegra_soc_prepare_system_off(void)
478{
479 /* System power off */
Vignesh Radhakrishnan2aaa41c2017-06-14 09:59:27 -0700480 mce_system_shutdown();
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700481
482 wfi();
483
484 /* wait for the system to power down */
485 for (;;) {
486 ;
487 }
488}
489
Anthony Zhou8bf6d4e2017-09-20 17:44:43 +0800490int32_t tegra_soc_prepare_system_reset(void)
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700491{
Vignesh Radhakrishnan2aaa41c2017-06-14 09:59:27 -0700492 /* System reboot */
493 mce_system_reboot();
494
Varun Wadekarecd6a5a2018-04-09 17:48:58 -0700495 return PSCI_E_SUCCESS;
496}