Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 1 | /* |
Varun Wadekar | 4edc17c | 2017-11-20 17:14:47 -0800 | [diff] [blame] | 2 | * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 8 | #include <assert.h> |
Steven Kao | 530b217 | 2017-06-23 16:18:58 +0800 | [diff] [blame] | 9 | #include <stdbool.h> |
| 10 | #include <string.h> |
| 11 | |
| 12 | #include <arch_helpers.h> |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 13 | #include <bpmp_ipc.h> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 14 | #include <common/bl_common.h> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 15 | #include <common/debug.h> |
Steven Kao | 530b217 | 2017-06-23 16:18:58 +0800 | [diff] [blame] | 16 | #include <context.h> |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 17 | #include <drivers/delay_timer.h> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 18 | #include <denver.h> |
Steven Kao | 530b217 | 2017-06-23 16:18:58 +0800 | [diff] [blame] | 19 | #include <lib/el3_runtime/context_mgmt.h> |
| 20 | #include <lib/psci/psci.h> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 21 | #include <mce.h> |
Dilan Lee | 4e7a63c | 2017-08-10 16:01:42 +0800 | [diff] [blame] | 22 | #include <mce_private.h> |
Pritesh Raithatha | 75c9443 | 2018-08-03 15:48:15 +0530 | [diff] [blame] | 23 | #include <memctrl_v2.h> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 24 | #include <plat/common/platform.h> |
Steven Kao | 530b217 | 2017-06-23 16:18:58 +0800 | [diff] [blame] | 25 | #include <se.h> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 26 | #include <smmu.h> |
Tejal Kudav | 153ba22 | 2017-02-14 18:02:04 -0800 | [diff] [blame] | 27 | #include <t194_nvg.h> |
Varun Wadekar | e0c222f | 2017-11-10 13:23:34 -0800 | [diff] [blame] | 28 | #include <tegra194_private.h> |
Steven Kao | 530b217 | 2017-06-23 16:18:58 +0800 | [diff] [blame] | 29 | #include <tegra_platform.h> |
| 30 | #include <tegra_private.h> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 31 | |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 32 | extern uint32_t __tegra194_cpu_reset_handler_data, |
| 33 | __tegra194_cpu_reset_handler_end; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 34 | |
| 35 | /* TZDRAM offset for saving SMMU context */ |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 36 | #define TEGRA194_SMMU_CTX_OFFSET 16U |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 37 | |
| 38 | /* state id mask */ |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 39 | #define TEGRA194_STATE_ID_MASK 0xFU |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 40 | /* constants to get power state's wake time */ |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 41 | #define TEGRA194_WAKE_TIME_MASK 0x0FFFFFF0U |
| 42 | #define TEGRA194_WAKE_TIME_SHIFT 4U |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 43 | /* default core wake mask for CPU_SUSPEND */ |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 44 | #define TEGRA194_CORE_WAKE_MASK 0x180cU |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 45 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 46 | static struct t19x_psci_percpu_data { |
| 47 | uint32_t wake_time; |
| 48 | } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT]; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 49 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 50 | int32_t tegra_soc_validate_power_state(uint32_t power_state, |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 51 | psci_power_state_t *req_state) |
| 52 | { |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 53 | uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 54 | TEGRA194_STATE_ID_MASK; |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 55 | uint32_t cpu = plat_my_core_pos(); |
| 56 | int32_t ret = PSCI_E_SUCCESS; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 57 | |
| 58 | /* save the core wake time (in TSC ticks)*/ |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 59 | t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK) |
| 60 | << TEGRA194_WAKE_TIME_SHIFT; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 61 | |
| 62 | /* |
Varun Wadekar | 56c6459 | 2019-12-03 08:50:57 -0800 | [diff] [blame] | 63 | * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure |
| 64 | * that the correct value is read in tegra_soc_pwr_domain_suspend(), |
| 65 | * which is called with caches disabled. It is possible to read a stale |
| 66 | * value from DRAM in that function, because the L2 cache is not flushed |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 67 | * unless the cluster is entering CC6/CC7. |
| 68 | */ |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 69 | clean_dcache_range((uint64_t)&t19x_percpu_data[cpu], |
| 70 | sizeof(t19x_percpu_data[cpu])); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 71 | |
| 72 | /* Sanity check the requested state id */ |
| 73 | switch (state_id) { |
| 74 | case PSTATE_ID_CORE_IDLE: |
Varun Wadekar | c61094b | 2017-12-27 18:01:59 -0800 | [diff] [blame] | 75 | |
| 76 | /* Core idle request */ |
| 77 | req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE; |
| 78 | req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN; |
| 79 | break; |
| 80 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 81 | case PSTATE_ID_CORE_POWERDN: |
| 82 | |
| 83 | /* Core powerdown request */ |
| 84 | req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; |
| 85 | req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; |
| 86 | |
| 87 | break; |
| 88 | |
| 89 | default: |
| 90 | ERROR("%s: unsupported state id (%d)\n", __func__, state_id); |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 91 | ret = PSCI_E_INVALID_PARAMS; |
| 92 | break; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 93 | } |
| 94 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 95 | return ret; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 96 | } |
| 97 | |
Varun Wadekar | c61094b | 2017-12-27 18:01:59 -0800 | [diff] [blame] | 98 | int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state) |
| 99 | { |
| 100 | uint32_t cpu = plat_my_core_pos(); |
| 101 | mce_cstate_info_t cstate_info = { 0 }; |
| 102 | |
| 103 | /* Program default wake mask */ |
| 104 | cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK; |
| 105 | cstate_info.update_wake_mask = 1; |
| 106 | mce_update_cstate_info(&cstate_info); |
| 107 | |
| 108 | /* Enter CPU idle */ |
| 109 | (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, |
| 110 | (uint64_t)TEGRA_NVG_CORE_C6, |
| 111 | t19x_percpu_data[cpu].wake_time, |
| 112 | 0U); |
| 113 | |
| 114 | return PSCI_E_SUCCESS; |
| 115 | } |
| 116 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 117 | int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 118 | { |
| 119 | const plat_local_state_t *pwr_domain_state; |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 120 | uint8_t stateid_afflvl0, stateid_afflvl2; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 121 | plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); |
Pritesh Raithatha | 75c9443 | 2018-08-03 15:48:15 +0530 | [diff] [blame] | 122 | uint64_t mc_ctx_base; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 123 | uint32_t val; |
Vignesh Radhakrishnan | d7a5c25 | 2017-05-25 16:27:42 -0700 | [diff] [blame] | 124 | mce_cstate_info_t sc7_cstate_info = { |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 125 | .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6, |
Vignesh Radhakrishnan | 85c129f | 2017-12-20 15:04:26 -0800 | [diff] [blame] | 126 | .ccplex = (uint32_t)TEGRA_NVG_CG_CG7, |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 127 | .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7, |
| 128 | .system_state_force = 1U, |
| 129 | .update_wake_mask = 1U, |
Vignesh Radhakrishnan | d7a5c25 | 2017-05-25 16:27:42 -0700 | [diff] [blame] | 130 | }; |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 131 | uint32_t cpu = plat_my_core_pos(); |
Vignesh Radhakrishnan | d7a5c25 | 2017-05-25 16:27:42 -0700 | [diff] [blame] | 132 | int32_t ret = 0; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 133 | |
| 134 | /* get the state ID */ |
| 135 | pwr_domain_state = target_state->pwr_domain_state; |
| 136 | stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] & |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 137 | TEGRA194_STATE_ID_MASK; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 138 | stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 139 | TEGRA194_STATE_ID_MASK; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 140 | |
kalyani chidambaram | e480c4e | 2018-07-24 13:58:27 -0700 | [diff] [blame] | 141 | if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) { |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 142 | |
Varun Wadekar | c61094b | 2017-12-27 18:01:59 -0800 | [diff] [blame] | 143 | /* Enter CPU powerdown */ |
| 144 | (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, |
| 145 | (uint64_t)TEGRA_NVG_CORE_C7, |
| 146 | t19x_percpu_data[cpu].wake_time, |
| 147 | 0U); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 148 | |
| 149 | } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { |
| 150 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 151 | /* save 'Secure Boot' Processor Feature Config Register */ |
| 152 | val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); |
Steven Kao | 4607f17 | 2017-10-23 18:35:14 +0800 | [diff] [blame] | 153 | mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 154 | |
Pritesh Raithatha | 75c9443 | 2018-08-03 15:48:15 +0530 | [diff] [blame] | 155 | /* save MC context */ |
| 156 | mc_ctx_base = params_from_bl2->tzdram_base + |
| 157 | tegra194_get_mc_ctx_offset(); |
| 158 | tegra_mc_save_context((uintptr_t)mc_ctx_base); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 159 | |
Steven Kao | 530b217 | 2017-06-23 16:18:58 +0800 | [diff] [blame] | 160 | /* |
| 161 | * Suspend SE, RNG1 and PKA1 only on silcon and fpga, |
| 162 | * since VDK does not support atomic se ctx save |
| 163 | */ |
| 164 | if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) { |
| 165 | ret = tegra_se_suspend(); |
| 166 | assert(ret == 0); |
| 167 | } |
| 168 | |
Varun Wadekar | 953699c | 2018-06-06 17:26:10 -0700 | [diff] [blame] | 169 | /* Prepare for system suspend */ |
| 170 | mce_update_cstate_info(&sc7_cstate_info); |
Tejal Kudav | 153ba22 | 2017-02-14 18:02:04 -0800 | [diff] [blame] | 171 | |
Varun Wadekar | 953699c | 2018-06-06 17:26:10 -0700 | [diff] [blame] | 172 | do { |
| 173 | val = (uint32_t)mce_command_handler( |
| 174 | (uint32_t)MCE_CMD_IS_SC7_ALLOWED, |
| 175 | (uint32_t)TEGRA_NVG_CORE_C7, |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 176 | MCE_CORE_SLEEP_TIME_INFINITE, |
| 177 | 0U); |
Varun Wadekar | 953699c | 2018-06-06 17:26:10 -0700 | [diff] [blame] | 178 | } while (val == 0U); |
Varun Wadekar | da865de | 2017-11-10 13:27:29 -0800 | [diff] [blame] | 179 | |
Varun Wadekar | 953699c | 2018-06-06 17:26:10 -0700 | [diff] [blame] | 180 | /* Instruct the MCE to enter system suspend state */ |
| 181 | ret = mce_command_handler( |
| 182 | (uint64_t)MCE_CMD_ENTER_CSTATE, |
| 183 | (uint64_t)TEGRA_NVG_CORE_C7, |
| 184 | MCE_CORE_SLEEP_TIME_INFINITE, |
| 185 | 0U); |
| 186 | assert(ret == 0); |
| 187 | |
| 188 | /* set system suspend state for house-keeping */ |
| 189 | tegra194_set_system_suspend_entry(); |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 190 | } else { |
| 191 | ; /* do nothing */ |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | return PSCI_E_SUCCESS; |
| 195 | } |
| 196 | |
| 197 | /******************************************************************************* |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 198 | * Helper function to check if this is the last ON CPU in the cluster |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 199 | ******************************************************************************/ |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 200 | static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states, |
| 201 | uint32_t ncpu) |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 202 | { |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 203 | plat_local_state_t target; |
| 204 | bool last_on_cpu = true; |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 205 | uint32_t num_cpus = ncpu, pos = 0; |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 206 | |
| 207 | do { |
| 208 | target = states[pos]; |
| 209 | if (target != PLAT_MAX_OFF_STATE) { |
| 210 | last_on_cpu = false; |
| 211 | } |
| 212 | --num_cpus; |
| 213 | pos++; |
| 214 | } while (num_cpus != 0U); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 215 | |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 216 | return last_on_cpu; |
| 217 | } |
| 218 | |
| 219 | /******************************************************************************* |
| 220 | * Helper function to get target power state for the cluster |
| 221 | ******************************************************************************/ |
| 222 | static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states, |
| 223 | uint32_t ncpu) |
| 224 | { |
| 225 | uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK; |
| 226 | plat_local_state_t target = states[core_pos]; |
| 227 | mce_cstate_info_t cstate_info = { 0 }; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 228 | |
| 229 | /* CPU suspend */ |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 230 | if (target == PSTATE_ID_CORE_POWERDN) { |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 231 | |
| 232 | /* Program default wake mask */ |
Krishna Sitaraman | c64afeb | 2017-01-23 16:15:44 -0800 | [diff] [blame] | 233 | cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK; |
| 234 | cstate_info.update_wake_mask = 1; |
| 235 | mce_update_cstate_info(&cstate_info); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 236 | } |
| 237 | |
| 238 | /* CPU off */ |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 239 | if (target == PLAT_MAX_OFF_STATE) { |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 240 | |
| 241 | /* Enable cluster powerdn from last CPU in the cluster */ |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 242 | if (tegra_last_on_cpu_in_cluster(states, ncpu)) { |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 243 | |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 244 | /* Enable CC6 state and turn off wake mask */ |
| 245 | cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6; |
Vignesh Radhakrishnan | 90d8019 | 2017-12-27 21:04:49 -0800 | [diff] [blame] | 246 | cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7; |
| 247 | cstate_info.system_state_force = 1; |
Krishna Sitaraman | 74813f9 | 2017-07-14 13:51:44 -0700 | [diff] [blame] | 248 | cstate_info.update_wake_mask = 1U; |
| 249 | mce_update_cstate_info(&cstate_info); |
| 250 | |
| 251 | } else { |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 252 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 253 | /* Turn off wake_mask */ |
Krishna Sitaraman | 74813f9 | 2017-07-14 13:51:44 -0700 | [diff] [blame] | 254 | cstate_info.update_wake_mask = 1U; |
| 255 | mce_update_cstate_info(&cstate_info); |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 256 | target = PSCI_LOCAL_STATE_RUN; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 257 | } |
| 258 | } |
| 259 | |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 260 | return target; |
| 261 | } |
| 262 | |
| 263 | /******************************************************************************* |
| 264 | * Platform handler to calculate the proper target power level at the |
| 265 | * specified affinity level |
| 266 | ******************************************************************************/ |
| 267 | plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, |
| 268 | const plat_local_state_t *states, |
| 269 | uint32_t ncpu) |
| 270 | { |
| 271 | plat_local_state_t target = PSCI_LOCAL_STATE_RUN; |
| 272 | uint32_t cpu = plat_my_core_pos(); |
| 273 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 274 | /* System Suspend */ |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 275 | if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) { |
| 276 | target = PSTATE_ID_SOC_POWERDN; |
| 277 | } |
| 278 | |
| 279 | /* CPU off, CPU suspend */ |
| 280 | if (lvl == (uint32_t)MPIDR_AFFLVL1) { |
| 281 | target = tegra_get_afflvl1_pwr_state(states, ncpu); |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 282 | } |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 283 | |
Varun Wadekar | 0723bb6 | 2017-10-16 15:57:17 -0700 | [diff] [blame] | 284 | /* target cluster/system state */ |
| 285 | return target; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 286 | } |
| 287 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 288 | int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 289 | { |
| 290 | const plat_local_state_t *pwr_domain_state = |
| 291 | target_state->pwr_domain_state; |
| 292 | plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 293 | uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & |
Varun Wadekar | 362a6b2 | 2017-11-10 11:04:42 -0800 | [diff] [blame] | 294 | TEGRA194_STATE_ID_MASK; |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 295 | uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE; |
Steven Kao | 55c2ce7 | 2016-12-23 15:51:32 +0800 | [diff] [blame] | 296 | uint64_t val; |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 297 | int32_t ret = PSCI_E_SUCCESS; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 298 | |
| 299 | if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 300 | val = params_from_bl2->tzdram_base + |
| 301 | tegra194_get_cpu_reset_handler_size(); |
| 302 | |
| 303 | /* initialise communication channel with BPMP */ |
| 304 | ret = tegra_bpmp_ipc_init(); |
| 305 | assert(ret == 0); |
| 306 | |
| 307 | /* Enable SE clock before SE context save */ |
Varun Wadekar | e55c27b | 2018-09-13 08:47:43 -0700 | [diff] [blame] | 308 | ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE); |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 309 | assert(ret == 0); |
| 310 | |
| 311 | /* |
| 312 | * It is very unlikely that the BL31 image would be |
| 313 | * bigger than 2^32 bytes |
| 314 | */ |
| 315 | assert(src_len_in_bytes < UINT32_MAX); |
| 316 | |
| 317 | if (tegra_se_calculate_save_sha256(BL31_BASE, |
| 318 | (uint32_t)src_len_in_bytes) != 0) { |
| 319 | ERROR("Hash calculation failed. Reboot\n"); |
| 320 | (void)tegra_soc_prepare_system_reset(); |
| 321 | } |
| 322 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 323 | /* |
| 324 | * The TZRAM loses power when we enter system suspend. To |
| 325 | * allow graceful exit from system suspend, we need to copy |
| 326 | * BL3-1 over to TZDRAM. |
| 327 | */ |
| 328 | val = params_from_bl2->tzdram_base + |
Varun Wadekar | e0c222f | 2017-11-10 13:23:34 -0800 | [diff] [blame] | 329 | tegra194_get_cpu_reset_handler_size(); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 330 | memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 331 | src_len_in_bytes); |
| 332 | |
| 333 | /* Disable SE clock after SE context save */ |
Varun Wadekar | e55c27b | 2018-09-13 08:47:43 -0700 | [diff] [blame] | 334 | ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE); |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 335 | assert(ret == 0); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 336 | } |
| 337 | |
Jeetesh Burman | dbcc95c | 2018-07-06 20:03:38 +0530 | [diff] [blame] | 338 | return ret; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 339 | } |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 340 | |
Varun Wadekar | b5b15b2 | 2018-05-17 10:10:25 -0700 | [diff] [blame] | 341 | int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state) |
| 342 | { |
| 343 | return PSCI_E_NOT_SUPPORTED; |
| 344 | } |
| 345 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 346 | int32_t tegra_soc_pwr_domain_on(u_register_t mpidr) |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 347 | { |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 348 | uint64_t target_cpu = mpidr & MPIDR_CPU_MASK; |
| 349 | uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 350 | MPIDR_AFFINITY_BITS; |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 351 | int32_t ret = 0; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 352 | |
Varun Wadekar | a4e0a81 | 2017-10-17 10:53:33 -0700 | [diff] [blame] | 353 | if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) { |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 354 | ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr); |
| 355 | return PSCI_E_NOT_PRESENT; |
| 356 | } |
| 357 | |
| 358 | /* construct the target CPU # */ |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 359 | target_cpu += (target_cluster << 1U); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 360 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 361 | ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U); |
| 362 | if (ret < 0) { |
| 363 | return PSCI_E_DENIED; |
| 364 | } |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 365 | |
| 366 | return PSCI_E_SUCCESS; |
| 367 | } |
| 368 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 369 | int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 370 | { |
Kalyani Chidambaram | fcd1e88 | 2018-09-12 14:59:08 -0700 | [diff] [blame] | 371 | const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); |
| 372 | uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step; |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 373 | uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; |
Kalyani Chidambaram | fcd1e88 | 2018-09-12 14:59:08 -0700 | [diff] [blame] | 374 | cpu_context_t *ctx = cm_get_context(NON_SECURE); |
| 375 | uint64_t actlr_elx; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 376 | |
| 377 | /* |
| 378 | * Reset power state info for CPUs when onlining, we set |
| 379 | * deepest power when offlining a core but that may not be |
| 380 | * requested by non-secure sw which controls idle states. It |
| 381 | * will re-init this info from non-secure software when the |
| 382 | * core come online. |
| 383 | */ |
Varun Wadekar | e5a39b0 | 2018-11-15 20:44:40 -0800 | [diff] [blame] | 384 | actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1)); |
| 385 | actlr_elx &= ~DENVER_CPU_PMSTATE_MASK; |
| 386 | actlr_elx |= DENVER_CPU_PMSTATE_C1; |
| 387 | write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 388 | |
| 389 | /* |
| 390 | * Check if we are exiting from deep sleep and restore SE |
| 391 | * context if we are. |
| 392 | */ |
| 393 | if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { |
Dilan Lee | 4e7a63c | 2017-08-10 16:01:42 +0800 | [diff] [blame] | 394 | |
Steven Kao | 8f4f102 | 2017-12-13 06:39:15 +0800 | [diff] [blame] | 395 | #if ENABLE_STRICT_CHECKING_MODE |
Dilan Lee | 4e7a63c | 2017-08-10 16:01:42 +0800 | [diff] [blame] | 396 | /* |
| 397 | * Enable strict checking after programming the GSC for |
| 398 | * enabling TZSRAM and TZDRAM |
| 399 | */ |
| 400 | mce_enable_strict_checking(); |
Steven Kao | 8f4f102 | 2017-12-13 06:39:15 +0800 | [diff] [blame] | 401 | #endif |
Dilan Lee | 4e7a63c | 2017-08-10 16:01:42 +0800 | [diff] [blame] | 402 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 403 | /* Init SMMU */ |
Vignesh Radhakrishnan | 978887f | 2017-07-11 15:16:08 -0700 | [diff] [blame] | 404 | tegra_smmu_init(); |
| 405 | |
Steven Kao | 530b217 | 2017-06-23 16:18:58 +0800 | [diff] [blame] | 406 | /* Resume SE, RNG1 and PKA1 */ |
| 407 | tegra_se_resume(); |
| 408 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 409 | /* |
Varun Wadekar | 4edc17c | 2017-11-20 17:14:47 -0800 | [diff] [blame] | 410 | * Program XUSB STREAMIDs |
| 411 | * ====================== |
| 412 | * T19x XUSB has support for XUSB virtualization. It will |
| 413 | * have one physical function (PF) and four Virtual functions |
| 414 | * (VF) |
| 415 | * |
| 416 | * There were below two SIDs for XUSB until T186. |
| 417 | * 1) #define TEGRA_SID_XUSB_HOST 0x1bU |
| 418 | * 2) #define TEGRA_SID_XUSB_DEV 0x1cU |
| 419 | * |
| 420 | * We have below four new SIDs added for VF(s) |
| 421 | * 3) #define TEGRA_SID_XUSB_VF0 0x5dU |
| 422 | * 4) #define TEGRA_SID_XUSB_VF1 0x5eU |
| 423 | * 5) #define TEGRA_SID_XUSB_VF2 0x5fU |
| 424 | * 6) #define TEGRA_SID_XUSB_VF3 0x60U |
| 425 | * |
| 426 | * When virtualization is enabled then we have to disable SID |
| 427 | * override and program above SIDs in below newly added SID |
| 428 | * registers in XUSB PADCTL MMIO space. These registers are |
| 429 | * TZ protected and so need to be done in ATF. |
| 430 | * |
| 431 | * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU) |
| 432 | * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU) |
| 433 | * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U) |
| 434 | * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U) |
| 435 | * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U) |
| 436 | * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU) |
| 437 | * |
| 438 | * This change disables SID override and programs XUSB SIDs |
| 439 | * in above registers to support both virtualization and |
| 440 | * non-virtualization platforms |
| 441 | */ |
Varun Wadekar | a2eb663 | 2018-03-23 10:44:40 -0700 | [diff] [blame] | 442 | if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) { |
| 443 | |
| 444 | mmio_write_32(TEGRA_XUSB_PADCTL_BASE + |
| 445 | XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST); |
| 446 | mmio_write_32(TEGRA_XUSB_PADCTL_BASE + |
| 447 | XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0); |
| 448 | mmio_write_32(TEGRA_XUSB_PADCTL_BASE + |
| 449 | XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1); |
| 450 | mmio_write_32(TEGRA_XUSB_PADCTL_BASE + |
| 451 | XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2); |
| 452 | mmio_write_32(TEGRA_XUSB_PADCTL_BASE + |
| 453 | XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3); |
| 454 | mmio_write_32(TEGRA_XUSB_PADCTL_BASE + |
| 455 | XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV); |
| 456 | } |
Kalyani Chidambaram | fcd1e88 | 2018-09-12 14:59:08 -0700 | [diff] [blame] | 457 | } |
Varun Wadekar | 4edc17c | 2017-11-20 17:14:47 -0800 | [diff] [blame] | 458 | |
Kalyani Chidambaram | fcd1e88 | 2018-09-12 14:59:08 -0700 | [diff] [blame] | 459 | /* |
| 460 | * Enable dual execution optimized translations for all ELx. |
| 461 | */ |
| 462 | if (enable_ccplex_lock_step != 0U) { |
| 463 | actlr_elx = read_actlr_el3(); |
| 464 | actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3; |
| 465 | write_actlr_el3(actlr_elx); |
| 466 | |
| 467 | actlr_elx = read_actlr_el2(); |
| 468 | actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2; |
| 469 | write_actlr_el2(actlr_elx); |
| 470 | |
| 471 | actlr_elx = read_actlr_el1(); |
| 472 | actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1; |
| 473 | write_actlr_el1(actlr_elx); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | return PSCI_E_SUCCESS; |
| 477 | } |
| 478 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 479 | int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 480 | { |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 481 | uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; |
Krishna Sitaraman | 74813f9 | 2017-07-14 13:51:44 -0700 | [diff] [blame] | 482 | int32_t ret = 0; |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 483 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 484 | (void)target_state; |
| 485 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 486 | /* Disable Denver's DCO operations */ |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 487 | if (impl == DENVER_IMPL) { |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 488 | denver_disable_dco(); |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 489 | } |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 490 | |
| 491 | /* Turn off CPU */ |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 492 | ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, |
| 493 | (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U); |
Krishna Sitaraman | 74813f9 | 2017-07-14 13:51:44 -0700 | [diff] [blame] | 494 | assert(ret == 0); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 495 | |
| 496 | return PSCI_E_SUCCESS; |
| 497 | } |
| 498 | |
| 499 | __dead2 void tegra_soc_prepare_system_off(void) |
| 500 | { |
| 501 | /* System power off */ |
Vignesh Radhakrishnan | 2aaa41c | 2017-06-14 09:59:27 -0700 | [diff] [blame] | 502 | mce_system_shutdown(); |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 503 | |
| 504 | wfi(); |
| 505 | |
| 506 | /* wait for the system to power down */ |
| 507 | for (;;) { |
| 508 | ; |
| 509 | } |
| 510 | } |
| 511 | |
Anthony Zhou | 8bf6d4e | 2017-09-20 17:44:43 +0800 | [diff] [blame] | 512 | int32_t tegra_soc_prepare_system_reset(void) |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 513 | { |
Vignesh Radhakrishnan | 2aaa41c | 2017-06-14 09:59:27 -0700 | [diff] [blame] | 514 | /* System reboot */ |
| 515 | mce_system_reboot(); |
| 516 | |
Varun Wadekar | ecd6a5a | 2018-04-09 17:48:58 -0700 | [diff] [blame] | 517 | return PSCI_E_SUCCESS; |
| 518 | } |