Saurabh Gorecha | 70389ca | 2020-04-22 21:31:24 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. |
| 3 | * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. |
| 4 | * |
| 5 | * SPDX-License-Identifier: BSD-3-Clause |
| 6 | */ |
| 7 | #include <assert.h> |
| 8 | |
| 9 | #include <arch_helpers.h> |
| 10 | #include <bl31/bl31.h> |
| 11 | #include <common/debug.h> |
| 12 | #include <lib/psci/psci.h> |
| 13 | |
| 14 | #include <platform.h> |
| 15 | #include <platform_def.h> |
| 16 | #include <qti_cpu.h> |
| 17 | #include <qti_plat.h> |
| 18 | #include <qtiseclib_cb_interface.h> |
| 19 | #include <qtiseclib_defs_plat.h> |
| 20 | #include <qtiseclib_interface.h> |
| 21 | |
| 22 | #define QTI_LOCAL_PSTATE_WIDTH 4 |
| 23 | #define QTI_LOCAL_PSTATE_MASK ((1 << QTI_LOCAL_PSTATE_WIDTH) - 1) |
| 24 | |
| 25 | /* Make composite power state parameter till level 0 */ |
| 26 | #define qti_make_pwrstate_lvl0(lvl0_state, type) \ |
| 27 | (((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT)) |
| 28 | |
| 29 | /* Make composite power state parameter till level 1 */ |
| 30 | #define qti_make_pwrstate_lvl1(lvl1_state, lvl0_state, type) \ |
| 31 | (((lvl1_state) << QTI_LOCAL_PSTATE_WIDTH) | \ |
| 32 | qti_make_pwrstate_lvl0(lvl0_state, type)) |
| 33 | |
| 34 | /* Make composite power state parameter till level 2 */ |
| 35 | #define qti_make_pwrstate_lvl2(lvl2_state, lvl1_state, lvl0_state, type) \ |
| 36 | (((lvl2_state) << (QTI_LOCAL_PSTATE_WIDTH * 2)) | \ |
| 37 | qti_make_pwrstate_lvl1(lvl1_state, lvl0_state, type)) |
| 38 | |
| 39 | /* Make composite power state parameter till level 3 */ |
| 40 | #define qti_make_pwrstate_lvl3(lvl3_state, lvl2_state, lvl1_state, lvl0_state, type) \ |
| 41 | (((lvl3_state) << (QTI_LOCAL_PSTATE_WIDTH * 3)) | \ |
| 42 | qti_make_pwrstate_lvl2(lvl2_state, lvl1_state, lvl0_state, type)) |
| 43 | |
| 44 | /* QTI_CORE_PWRDN_EN_MASK happens to be same across all CPUs */ |
| 45 | #define QTI_CORE_PWRDN_EN_MASK 1 |
| 46 | |
| 47 | /* cpu power control happens to be same across all CPUs */ |
| 48 | _DEFINE_SYSREG_WRITE_FUNC(cpu_pwrctrl_val, S3_0_C15_C2_7) |
| 49 | _DEFINE_SYSREG_READ_FUNC(cpu_pwrctrl_val, S3_0_C15_C2_7) |
| 50 | |
| 51 | const unsigned int qti_pm_idle_states[] = { |
| 52 | qti_make_pwrstate_lvl0(QTI_LOCAL_STATE_OFF, |
| 53 | PSTATE_TYPE_POWERDOWN), |
| 54 | qti_make_pwrstate_lvl0(QTI_LOCAL_STATE_DEEPOFF, |
| 55 | PSTATE_TYPE_POWERDOWN), |
| 56 | qti_make_pwrstate_lvl1(QTI_LOCAL_STATE_DEEPOFF, |
| 57 | QTI_LOCAL_STATE_DEEPOFF, |
| 58 | PSTATE_TYPE_POWERDOWN), |
| 59 | qti_make_pwrstate_lvl2(QTI_LOCAL_STATE_OFF, |
| 60 | QTI_LOCAL_STATE_DEEPOFF, |
| 61 | QTI_LOCAL_STATE_DEEPOFF, |
| 62 | PSTATE_TYPE_POWERDOWN), |
| 63 | qti_make_pwrstate_lvl3(QTI_LOCAL_STATE_OFF, |
| 64 | QTI_LOCAL_STATE_DEEPOFF, |
| 65 | QTI_LOCAL_STATE_DEEPOFF, |
| 66 | QTI_LOCAL_STATE_DEEPOFF, |
| 67 | PSTATE_TYPE_POWERDOWN), |
| 68 | 0, |
| 69 | }; |
| 70 | |
| 71 | /******************************************************************************* |
| 72 | * QTI standard platform handler called to check the validity of the power |
| 73 | * state parameter. The power state parameter has to be a composite power |
| 74 | * state. |
| 75 | ******************************************************************************/ |
| 76 | int qti_validate_power_state(unsigned int power_state, |
| 77 | psci_power_state_t *req_state) |
| 78 | { |
| 79 | unsigned int state_id; |
| 80 | int i; |
| 81 | |
| 82 | assert(req_state); |
| 83 | |
| 84 | /* |
| 85 | * Currently we are using a linear search for finding the matching |
| 86 | * entry in the idle power state array. This can be made a binary |
| 87 | * search if the number of entries justify the additional complexity. |
| 88 | */ |
| 89 | for (i = 0; !!qti_pm_idle_states[i]; i++) { |
| 90 | if (power_state == qti_pm_idle_states[i]) |
| 91 | break; |
| 92 | } |
| 93 | |
| 94 | /* Return error if entry not found in the idle state array */ |
| 95 | if (!qti_pm_idle_states[i]) |
| 96 | return PSCI_E_INVALID_PARAMS; |
| 97 | |
| 98 | i = 0; |
| 99 | state_id = psci_get_pstate_id(power_state); |
| 100 | |
| 101 | /* Parse the State ID and populate the state info parameter */ |
| 102 | while (state_id) { |
| 103 | req_state->pwr_domain_state[i++] = state_id & |
| 104 | QTI_LOCAL_PSTATE_MASK; |
| 105 | state_id >>= QTI_LOCAL_PSTATE_WIDTH; |
| 106 | } |
| 107 | |
| 108 | return PSCI_E_SUCCESS; |
| 109 | } |
| 110 | |
| 111 | /******************************************************************************* |
| 112 | * PLATFORM FUNCTIONS |
| 113 | ******************************************************************************/ |
| 114 | |
| 115 | static void qti_set_cpupwrctlr_val(void) |
| 116 | { |
| 117 | unsigned long val; |
| 118 | |
| 119 | val = read_cpu_pwrctrl_val(); |
| 120 | val |= QTI_CORE_PWRDN_EN_MASK; |
| 121 | write_cpu_pwrctrl_val(val); |
| 122 | |
| 123 | isb(); |
| 124 | } |
| 125 | |
| 126 | /** |
| 127 | * CPU power on function - ideally we want a wrapper since this function is |
| 128 | * target specific. But to unblock teams. |
| 129 | */ |
| 130 | static int qti_cpu_power_on(u_register_t mpidr) |
| 131 | { |
| 132 | int core_pos = plat_core_pos_by_mpidr(mpidr); |
| 133 | |
| 134 | /* If not valid mpidr, return error */ |
| 135 | if (core_pos < 0 || core_pos >= QTISECLIB_PLAT_CORE_COUNT) { |
| 136 | return PSCI_E_INVALID_PARAMS; |
| 137 | } |
| 138 | |
| 139 | return qtiseclib_psci_node_power_on(mpidr); |
| 140 | } |
| 141 | |
| 142 | static bool is_cpu_off(const psci_power_state_t *target_state) |
| 143 | { |
| 144 | if ((target_state->pwr_domain_state[QTI_PWR_LVL0] == |
| 145 | QTI_LOCAL_STATE_OFF) || |
| 146 | (target_state->pwr_domain_state[QTI_PWR_LVL0] == |
| 147 | QTI_LOCAL_STATE_DEEPOFF)) { |
| 148 | return true; |
| 149 | } else { |
| 150 | return false; |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | static void qti_cpu_power_on_finish(const psci_power_state_t *target_state) |
| 155 | { |
| 156 | const uint8_t *pwr_states = |
| 157 | (const uint8_t *)target_state->pwr_domain_state; |
| 158 | qtiseclib_psci_node_on_finish(pwr_states); |
| 159 | |
| 160 | if (is_cpu_off(target_state)) { |
| 161 | plat_qti_gic_cpuif_enable(); |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | static void qti_cpu_standby(plat_local_state_t cpu_state) |
| 166 | { |
| 167 | } |
| 168 | |
| 169 | static void qti_node_power_off(const psci_power_state_t *target_state) |
| 170 | { |
| 171 | qtiseclib_psci_node_power_off((const uint8_t *) |
| 172 | target_state->pwr_domain_state); |
| 173 | if (is_cpu_off(target_state)) { |
| 174 | plat_qti_gic_cpuif_disable(); |
| 175 | qti_set_cpupwrctlr_val(); |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | static void qti_node_suspend(const psci_power_state_t *target_state) |
| 180 | { |
| 181 | qtiseclib_psci_node_suspend((const uint8_t *)target_state-> |
| 182 | pwr_domain_state); |
| 183 | if (is_cpu_off(target_state)) { |
| 184 | plat_qti_gic_cpuif_disable(); |
| 185 | qti_set_cpupwrctlr_val(); |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | static void qti_node_suspend_finish(const psci_power_state_t *target_state) |
| 190 | { |
| 191 | const uint8_t *pwr_states = |
| 192 | (const uint8_t *)target_state->pwr_domain_state; |
| 193 | qtiseclib_psci_node_suspend_finish(pwr_states); |
| 194 | if (is_cpu_off(target_state)) { |
| 195 | plat_qti_gic_cpuif_enable(); |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | __dead2 void qti_domain_power_down_wfi(const psci_power_state_t *target_state) |
| 200 | { |
| 201 | |
| 202 | /* For now just do WFI - add any target specific handling if needed */ |
| 203 | psci_power_down_wfi(); |
| 204 | /* We should never reach here */ |
| 205 | } |
| 206 | |
| 207 | __dead2 void qti_system_off(void) |
| 208 | { |
| 209 | qtiseclib_psci_system_off(); |
| 210 | } |
| 211 | |
| 212 | __dead2 void qti_system_reset(void) |
| 213 | { |
| 214 | qtiseclib_psci_system_reset(); |
| 215 | } |
| 216 | |
| 217 | void qti_get_sys_suspend_power_state(psci_power_state_t *req_state) |
| 218 | { |
| 219 | int i = 0; |
| 220 | unsigned int state_id, power_state; |
| 221 | int size = ARRAY_SIZE(qti_pm_idle_states); |
| 222 | |
| 223 | /* |
| 224 | * Find deepest state. |
| 225 | * The arm_pm_idle_states[] array has last element by default 0, |
| 226 | * so the real deepest state is second last element of that array. |
| 227 | */ |
| 228 | power_state = qti_pm_idle_states[size - 2]; |
| 229 | state_id = psci_get_pstate_id(power_state); |
| 230 | |
| 231 | /* Parse the State ID and populate the state info parameter */ |
| 232 | while (state_id) { |
| 233 | req_state->pwr_domain_state[i++] = |
| 234 | state_id & QTI_LOCAL_PSTATE_MASK; |
| 235 | state_id >>= QTI_LOCAL_PSTATE_WIDTH; |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * Structure containing platform specific PSCI operations. Common |
| 241 | * PSCI layer will use this. |
| 242 | */ |
| 243 | const plat_psci_ops_t plat_qti_psci_pm_ops = { |
| 244 | .pwr_domain_on = qti_cpu_power_on, |
| 245 | .pwr_domain_on_finish = qti_cpu_power_on_finish, |
| 246 | .cpu_standby = qti_cpu_standby, |
| 247 | .pwr_domain_off = qti_node_power_off, |
| 248 | .pwr_domain_suspend = qti_node_suspend, |
| 249 | .pwr_domain_suspend_finish = qti_node_suspend_finish, |
| 250 | .pwr_domain_pwr_down_wfi = qti_domain_power_down_wfi, |
| 251 | .system_off = qti_system_off, |
| 252 | .system_reset = qti_system_reset, |
| 253 | .get_node_hw_state = NULL, |
| 254 | .translate_power_state_by_mpidr = NULL, |
| 255 | .get_sys_suspend_power_state = qti_get_sys_suspend_power_state, |
| 256 | .validate_power_state = qti_validate_power_state, |
| 257 | }; |
| 258 | |
| 259 | /** |
| 260 | * The QTI Standard platform definition of platform porting API |
| 261 | * `plat_setup_psci_ops`. |
| 262 | */ |
| 263 | int plat_setup_psci_ops(uintptr_t sec_entrypoint, |
| 264 | const plat_psci_ops_t **psci_ops) |
| 265 | { |
| 266 | int err; |
| 267 | |
| 268 | err = qtiseclib_psci_init((uintptr_t)bl31_warm_entrypoint); |
| 269 | if (err == PSCI_E_SUCCESS) { |
| 270 | *psci_ops = &plat_qti_psci_pm_ops; |
| 271 | } |
| 272 | |
| 273 | return err; |
| 274 | } |