blob: 41c79190bd3ce0e98be3dcaf27be85af9620c54e [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Jayanth Dodderi Chidanand18d93792023-07-18 14:48:09 +01002 * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00007#include <assert.h>
8#include <string.h>
9
Dan Handley2bd4ef22014-04-09 13:14:54 +010010#include <arch.h>
Jayanth Dodderi Chidanand18d93792023-07-18 14:48:09 +010011#include <arch_features.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010012#include <arch_helpers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <common/bl_common.h>
14#include <common/debug.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010015#include <context.h>
Sandeep Tripathy12030042020-08-17 20:22:13 +053016#include <drivers/delay_timer.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000017#include <lib/el3_runtime/context_mgmt.h>
Jayanth Dodderi Chidanand18d93792023-07-18 14:48:09 +010018#include <lib/extensions/spe.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000019#include <lib/utils.h>
20#include <plat/common/platform.h>
21
Dan Handley714a0d22014-04-09 13:13:04 +010022#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010023
Achin Gupta607084e2014-02-09 18:24:19 +000024/*
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000025 * SPD power management operations, expected to be supplied by the registered
26 * SPD on successful SP initialization
Achin Gupta607084e2014-02-09 18:24:19 +000027 */
Dan Handleye2712bc2014-04-10 15:37:22 +010028const spd_pm_ops_t *psci_spd_pm;
Achin Gupta607084e2014-02-09 18:24:19 +000029
Soby Mathew981487a2015-07-13 14:10:57 +010030/*
31 * PSCI requested local power state map. This array is used to store the local
32 * power states requested by a CPU for power levels from level 1 to
33 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
34 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
35 * CPU are the same.
36 *
37 * During state coordination, the platform is passed an array containing the
38 * local states requested for a particular non cpu power domain by each cpu
39 * within the domain.
40 *
41 * TODO: Dense packing of the requested states will cause cache thrashing
42 * when multiple power domains write to it. If we allocate the requested
43 * states at each power level in a cache-line aligned per-domain memory,
44 * the cache thrashing can be avoided.
45 */
46static plat_local_state_t
47 psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
48
Pankaj Gupta02c35682019-10-15 15:44:45 +053049unsigned int psci_plat_core_count;
Soby Mathew981487a2015-07-13 14:10:57 +010050
Achin Gupta4f6ad662013-10-25 09:08:21 +010051/*******************************************************************************
Soby Mathew981487a2015-07-13 14:10:57 +010052 * Arrays that hold the platform's power domain tree information for state
53 * management of power domains.
54 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
55 * which is an ancestor of a CPU power domain.
56 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
Achin Gupta4f6ad662013-10-25 09:08:21 +010057 ******************************************************************************/
Soby Mathew981487a2015-07-13 14:10:57 +010058non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
Soby Mathew2ae20432015-01-08 18:02:44 +000059#if USE_COHERENT_MEM
Chris Kay33bfc5e2023-02-14 11:30:04 +000060__section(".tzfw_coherent_mem")
Soby Mathew2ae20432015-01-08 18:02:44 +000061#endif
62;
Achin Gupta4f6ad662013-10-25 09:08:21 +010063
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +000064/* Lock for PSCI state coordination */
65DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
Andrew Thoelkee466c9f2015-09-10 11:39:36 +010066
Soby Mathew981487a2015-07-13 14:10:57 +010067cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
68
Achin Gupta4f6ad662013-10-25 09:08:21 +010069/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +010070 * Pointer to functions exported by the platform to complete power mgmt. ops
71 ******************************************************************************/
Soby Mathew981487a2015-07-13 14:10:57 +010072const plat_psci_ops_t *psci_plat_pm_ops;
Achin Gupta4f6ad662013-10-25 09:08:21 +010073
Soby Mathew981487a2015-07-13 14:10:57 +010074/******************************************************************************
75 * Check that the maximum power level supported by the platform makes sense
76 *****************************************************************************/
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +010077CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
78 (PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
79 assert_platform_max_pwrlvl_check);
Soby Mathew2b7de2b2015-02-12 14:45:02 +000080
Wing Li71f69df2022-09-14 13:18:15 -070081#if PSCI_OS_INIT_MODE
82/*******************************************************************************
83 * The power state coordination mode used in CPU_SUSPEND.
84 * Defaults to platform-coordinated mode.
85 ******************************************************************************/
86suspend_mode_t psci_suspend_mode = PLAT_COORD;
87#endif
88
Soby Mathew981487a2015-07-13 14:10:57 +010089/*
90 * The plat_local_state used by the platform is one of these types: RUN,
91 * RETENTION and OFF. The platform can define further sub-states for each type
92 * apart from RUN. This categorization is done to verify the sanity of the
93 * psci_power_state passed by the platform and to print debug information. The
94 * categorization is done on the basis of the following conditions:
95 *
96 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
97 *
98 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
99 * STATE_TYPE_RETN.
100 *
101 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
102 * STATE_TYPE_OFF.
103 */
104typedef enum plat_local_state_type {
105 STATE_TYPE_RUN = 0,
106 STATE_TYPE_RETN,
107 STATE_TYPE_OFF
108} plat_local_state_type_t;
109
Antonio Nino Diaz5a42b682018-07-18 11:57:21 +0100110/* Function used to categorize plat_local_state. */
111static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
112{
113 if (state != 0U) {
114 if (state > PLAT_MAX_RET_STATE) {
115 return STATE_TYPE_OFF;
116 } else {
117 return STATE_TYPE_RETN;
118 }
119 } else {
120 return STATE_TYPE_RUN;
121 }
122}
Soby Mathew981487a2015-07-13 14:10:57 +0100123
124/******************************************************************************
125 * Check that the maximum retention level supported by the platform is less
126 * than the maximum off level.
127 *****************************************************************************/
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100128CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
Soby Mathew981487a2015-07-13 14:10:57 +0100129 assert_platform_max_off_and_retn_state_check);
130
131/******************************************************************************
132 * This function ensures that the power state parameter in a CPU_SUSPEND request
133 * is valid. If so, it returns the requested states for each power level.
134 *****************************************************************************/
135int psci_validate_power_state(unsigned int power_state,
136 psci_power_state_t *state_info)
Achin Guptaf6b9e992014-07-31 11:19:11 +0100137{
Soby Mathew981487a2015-07-13 14:10:57 +0100138 /* Check SBZ bits in power state are zero */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100139 if (psci_check_power_state(power_state) != 0U)
Soby Mathew981487a2015-07-13 14:10:57 +0100140 return PSCI_E_INVALID_PARAMS;
Achin Guptaf6b9e992014-07-31 11:19:11 +0100141
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100142 assert(psci_plat_pm_ops->validate_power_state != NULL);
Achin Guptaf6b9e992014-07-31 11:19:11 +0100143
Soby Mathew981487a2015-07-13 14:10:57 +0100144 /* Validate the power_state using platform pm_ops */
145 return psci_plat_pm_ops->validate_power_state(power_state, state_info);
146}
Achin Guptaf6b9e992014-07-31 11:19:11 +0100147
Soby Mathew981487a2015-07-13 14:10:57 +0100148/******************************************************************************
149 * This function retrieves the `psci_power_state_t` for system suspend from
150 * the platform.
151 *****************************************************************************/
152void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
153{
154 /*
155 * Assert that the required pm_ops hook is implemented to ensure that
156 * the capability detected during psci_setup() is valid.
157 */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100158 assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
Soby Mathew981487a2015-07-13 14:10:57 +0100159
160 /*
161 * Query the platform for the power_state required for system suspend
162 */
163 psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
Achin Guptaf6b9e992014-07-31 11:19:11 +0100164}
165
Wing Li2c556f32022-09-14 13:18:17 -0700166#if PSCI_OS_INIT_MODE
167/*******************************************************************************
168 * This function verifies that all the other cores at the 'end_pwrlvl' have been
169 * idled and the current CPU is the last running CPU at the 'end_pwrlvl'.
170 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
171 * otherwise.
172 ******************************************************************************/
173static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int end_pwrlvl)
174{
175 unsigned int my_idx, lvl, parent_idx;
176 unsigned int cpu_start_idx, ncpus, cpu_idx;
177 plat_local_state_t local_state;
178
179 if (end_pwrlvl == PSCI_CPU_PWR_LVL) {
180 return true;
181 }
182
183 my_idx = plat_my_core_pos();
184
185 for (lvl = PSCI_CPU_PWR_LVL; lvl <= end_pwrlvl; lvl++) {
186 parent_idx = psci_cpu_pd_nodes[my_idx].parent_node;
187 }
188
189 cpu_start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
190 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
191
192 for (cpu_idx = cpu_start_idx; cpu_idx < cpu_start_idx + ncpus;
193 cpu_idx++) {
194 local_state = psci_get_cpu_local_state_by_idx(cpu_idx);
195 if (cpu_idx == my_idx) {
196 assert(is_local_state_run(local_state) != 0);
197 continue;
198 }
199
200 if (is_local_state_run(local_state) != 0) {
201 return false;
202 }
203 }
204
205 return true;
206}
207#endif
208
Achin Guptaf6b9e992014-07-31 11:19:11 +0100209/*******************************************************************************
Wing Li71f69df2022-09-14 13:18:15 -0700210 * This function verifies that all the other cores in the system have been
Soby Mathew96168382014-12-17 14:47:57 +0000211 * turned OFF and the current CPU is the last running CPU in the system.
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +0100212 * Returns true, if the current CPU is the last ON CPU or false otherwise.
Soby Mathew96168382014-12-17 14:47:57 +0000213 ******************************************************************************/
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +0100214bool psci_is_last_on_cpu(void)
Soby Mathew96168382014-12-17 14:47:57 +0000215{
Deepika Bhavnani79ffab52019-08-27 00:32:24 +0300216 unsigned int cpu_idx, my_idx = plat_my_core_pos();
Soby Mathew96168382014-12-17 14:47:57 +0000217
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +0100218 for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
Soby Mathew981487a2015-07-13 14:10:57 +0100219 if (cpu_idx == my_idx) {
220 assert(psci_get_aff_info_state() == AFF_STATE_ON);
Soby Mathew96168382014-12-17 14:47:57 +0000221 continue;
222 }
223
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +0100224 if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) {
225 VERBOSE("core=%u other than current core=%u %s\n",
226 cpu_idx, my_idx, "running in the system");
227 return false;
228 }
Soby Mathew96168382014-12-17 14:47:57 +0000229 }
230
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +0100231 return true;
Soby Mathew96168382014-12-17 14:47:57 +0000232}
233
234/*******************************************************************************
Wing Li71f69df2022-09-14 13:18:15 -0700235 * This function verifies that all cores in the system have been turned ON.
236 * Returns true, if all CPUs are ON or false otherwise.
237 ******************************************************************************/
238static bool psci_are_all_cpus_on(void)
239{
240 unsigned int cpu_idx;
241
242 for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
243 if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_OFF) {
244 return false;
245 }
246 }
247
248 return true;
249}
250
251/*******************************************************************************
Soby Mathew981487a2015-07-13 14:10:57 +0100252 * Routine to return the maximum power level to traverse to after a cpu has
253 * been physically powered up. It is expected to be called immediately after
254 * reset from assembler code.
Achin Guptaf6b9e992014-07-31 11:19:11 +0100255 ******************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +0100256static unsigned int get_power_on_target_pwrlvl(void)
Achin Guptaf6b9e992014-07-31 11:19:11 +0100257{
Soby Mathew011ca182015-07-29 17:05:03 +0100258 unsigned int pwrlvl;
Achin Guptaf6b9e992014-07-31 11:19:11 +0100259
260 /*
Soby Mathew981487a2015-07-13 14:10:57 +0100261 * Assume that this cpu was suspended and retrieve its target power
262 * level. If it is invalid then it could only have been turned off
263 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
264 * cpu can be turned off to.
Achin Guptaf6b9e992014-07-31 11:19:11 +0100265 */
Soby Mathew981487a2015-07-13 14:10:57 +0100266 pwrlvl = psci_get_suspend_pwrlvl();
Soby Mathew011ca182015-07-29 17:05:03 +0100267 if (pwrlvl == PSCI_INVALID_PWR_LVL)
Soby Mathew981487a2015-07-13 14:10:57 +0100268 pwrlvl = PLAT_MAX_PWR_LVL;
Deepika Bhavnani523024c2019-08-17 01:10:02 +0300269 assert(pwrlvl < PSCI_INVALID_PWR_LVL);
Soby Mathew981487a2015-07-13 14:10:57 +0100270 return pwrlvl;
Achin Guptaf6b9e992014-07-31 11:19:11 +0100271}
272
Soby Mathew981487a2015-07-13 14:10:57 +0100273/******************************************************************************
274 * Helper function to update the requested local power state array. This array
275 * does not store the requested state for the CPU power level. Hence an
Deepika Bhavnani6bd46662019-08-15 00:56:46 +0300276 * assertion is added to prevent us from accessing the CPU power level.
Soby Mathew981487a2015-07-13 14:10:57 +0100277 *****************************************************************************/
278static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
279 unsigned int cpu_idx,
280 plat_local_state_t req_pwr_state)
Achin Guptaf6b9e992014-07-31 11:19:11 +0100281{
Soby Mathew981487a2015-07-13 14:10:57 +0100282 assert(pwrlvl > PSCI_CPU_PWR_LVL);
Deepika Bhavnani6bd46662019-08-15 00:56:46 +0300283 if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
Pankaj Gupta02c35682019-10-15 15:44:45 +0530284 (cpu_idx < psci_plat_core_count)) {
Deepika Bhavnani6bd46662019-08-15 00:56:46 +0300285 psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
286 }
Achin Guptaf6b9e992014-07-31 11:19:11 +0100287}
288
Soby Mathew981487a2015-07-13 14:10:57 +0100289/******************************************************************************
290 * This function initializes the psci_req_local_pwr_states.
291 *****************************************************************************/
Daniel Boulby5753e492018-09-20 14:12:46 +0100292void __init psci_init_req_local_pwr_states(void)
Achin Guptaa45e3972013-12-05 15:10:48 +0000293{
Soby Mathew981487a2015-07-13 14:10:57 +0100294 /* Initialize the requested state of all non CPU power domains as OFF */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100295 unsigned int pwrlvl;
Pankaj Gupta02c35682019-10-15 15:44:45 +0530296 unsigned int core;
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100297
298 for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
Pankaj Gupta02c35682019-10-15 15:44:45 +0530299 for (core = 0; core < psci_plat_core_count; core++) {
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100300 psci_req_local_pwr_states[pwrlvl][core] =
301 PLAT_MAX_OFF_STATE;
302 }
303 }
Soby Mathew981487a2015-07-13 14:10:57 +0100304}
Achin Guptaa45e3972013-12-05 15:10:48 +0000305
Soby Mathew981487a2015-07-13 14:10:57 +0100306/******************************************************************************
307 * Helper function to return a reference to an array containing the local power
308 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
309 * array will be the number of cpu power domains of which this power domain is
310 * an ancestor. These requested states will be used to determine a suitable
311 * target state for this power domain during psci state coordination. An
312 * assertion is added to prevent us from accessing the CPU power level.
313 *****************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +0100314static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
Deepika Bhavnani79ffab52019-08-27 00:32:24 +0300315 unsigned int cpu_idx)
Soby Mathew981487a2015-07-13 14:10:57 +0100316{
317 assert(pwrlvl > PSCI_CPU_PWR_LVL);
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100318
Deepika Bhavnani6bd46662019-08-15 00:56:46 +0300319 if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
Pankaj Gupta02c35682019-10-15 15:44:45 +0530320 (cpu_idx < psci_plat_core_count)) {
Deepika Bhavnani6bd46662019-08-15 00:56:46 +0300321 return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
322 } else
323 return NULL;
Soby Mathew981487a2015-07-13 14:10:57 +0100324}
Achin Guptaa45e3972013-12-05 15:10:48 +0000325
Wing Li2c556f32022-09-14 13:18:17 -0700326#if PSCI_OS_INIT_MODE
327/******************************************************************************
328 * Helper function to save a copy of the psci_req_local_pwr_states (prev) for a
329 * CPU (cpu_idx), and update psci_req_local_pwr_states with the new requested
330 * local power states (state_info).
331 *****************************************************************************/
332void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
333 unsigned int cpu_idx,
334 psci_power_state_t *state_info,
335 plat_local_state_t *prev)
336{
337 unsigned int lvl;
338#ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
339 unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
340#else
341 unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL;
342#endif
343 plat_local_state_t req_state;
344
345 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) {
346 /* Save the previous requested local power state */
347 prev[lvl - 1U] = *psci_get_req_local_pwr_states(lvl, cpu_idx);
348
349 /* Update the new requested local power state */
350 if (lvl <= end_pwrlvl) {
351 req_state = state_info->pwr_domain_state[lvl];
352 } else {
353 req_state = state_info->pwr_domain_state[end_pwrlvl];
354 }
355 psci_set_req_local_pwr_state(lvl, cpu_idx, req_state);
356 }
357}
358
359/******************************************************************************
360 * Helper function to restore the previously saved requested local power states
361 * (prev) for a CPU (cpu_idx) to psci_req_local_pwr_states.
362 *****************************************************************************/
363void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
364 plat_local_state_t *prev)
365{
366 unsigned int lvl;
367#ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
368 unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
369#else
370 unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL;
371#endif
372
373 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) {
374 /* Restore the previous requested local power state */
375 psci_set_req_local_pwr_state(lvl, cpu_idx, prev[lvl - 1U]);
376 }
377}
378#endif
379
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000380/*
381 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
382 * memory.
383 *
384 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
385 * it's accessed by both cached and non-cached participants. To serve the common
386 * minimum, perform a cache flush before read and after write so that non-cached
387 * participants operate on latest data in main memory.
388 *
389 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
390 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
391 * In both cases, no cache operations are required.
392 */
393
394/*
395 * Retrieve local state of non-CPU power domain node from a non-cached CPU,
396 * after any required cache maintenance operation.
397 */
398static plat_local_state_t get_non_cpu_pd_node_local_state(
399 unsigned int parent_idx)
400{
Andrew F. Davise6f28fa2018-08-30 12:13:57 -0500401#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000402 flush_dcache_range(
403 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
404 sizeof(psci_non_cpu_pd_nodes[parent_idx]));
405#endif
406 return psci_non_cpu_pd_nodes[parent_idx].local_state;
407}
408
409/*
410 * Update local state of non-CPU power domain node from a cached CPU; perform
411 * any required cache maintenance operation afterwards.
412 */
413static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
414 plat_local_state_t state)
415{
416 psci_non_cpu_pd_nodes[parent_idx].local_state = state;
Andrew F. Davise6f28fa2018-08-30 12:13:57 -0500417#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000418 flush_dcache_range(
419 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
420 sizeof(psci_non_cpu_pd_nodes[parent_idx]));
421#endif
422}
423
Soby Mathew981487a2015-07-13 14:10:57 +0100424/******************************************************************************
425 * Helper function to return the current local power state of each power domain
426 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
427 * function will be called after a cpu is powered on to find the local state
428 * each power domain has emerged from.
429 *****************************************************************************/
Achin Gupta9b2bf252016-06-28 16:46:15 +0100430void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
431 psci_power_state_t *target_state)
Soby Mathew981487a2015-07-13 14:10:57 +0100432{
Soby Mathew011ca182015-07-29 17:05:03 +0100433 unsigned int parent_idx, lvl;
Soby Mathew981487a2015-07-13 14:10:57 +0100434 plat_local_state_t *pd_state = target_state->pwr_domain_state;
435
436 pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
437 parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
438
439 /* Copy the local power state from node to state_info */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100440 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000441 pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
Soby Mathew981487a2015-07-13 14:10:57 +0100442 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
443 }
444
445 /* Set the the higher levels to RUN */
446 for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
447 target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
448}
449
450/******************************************************************************
451 * Helper function to set the target local power state that each power domain
452 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
453 * enter. This function will be called after coordination of requested power
454 * states has been done for each power level.
455 *****************************************************************************/
Wing Lic0dc6392023-05-04 08:31:19 -0700456void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
457 const psci_power_state_t *target_state)
Soby Mathew981487a2015-07-13 14:10:57 +0100458{
Soby Mathew011ca182015-07-29 17:05:03 +0100459 unsigned int parent_idx, lvl;
Soby Mathew981487a2015-07-13 14:10:57 +0100460 const plat_local_state_t *pd_state = target_state->pwr_domain_state;
461
462 psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
Achin Guptaa45e3972013-12-05 15:10:48 +0000463
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100464 /*
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000465 * Need to flush as local_state might be accessed with Data Cache
Soby Mathew981487a2015-07-13 14:10:57 +0100466 * disabled during power on
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100467 */
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000468 psci_flush_cpu_data(psci_svc_cpu_data.local_state);
Soby Mathew981487a2015-07-13 14:10:57 +0100469
470 parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
471
472 /* Copy the local_state from state_info */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100473 for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000474 set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
Soby Mathew981487a2015-07-13 14:10:57 +0100475 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
476 }
Achin Guptaa45e3972013-12-05 15:10:48 +0000477}
478
479/*******************************************************************************
Soby Mathew981487a2015-07-13 14:10:57 +0100480 * PSCI helper function to get the parent nodes corresponding to a cpu_index.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100481 ******************************************************************************/
Deepika Bhavnani79ffab52019-08-27 00:32:24 +0300482void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
Soby Mathew011ca182015-07-29 17:05:03 +0100483 unsigned int end_lvl,
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100484 unsigned int *node_index)
Soby Mathew981487a2015-07-13 14:10:57 +0100485{
486 unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
Varun Wadekar66231d12017-06-07 09:57:42 -0700487 unsigned int i;
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100488 unsigned int *node = node_index;
Soby Mathew981487a2015-07-13 14:10:57 +0100489
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100490 for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
491 *node = parent_node;
492 node++;
Soby Mathew981487a2015-07-13 14:10:57 +0100493 parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
494 }
495}
496
497/******************************************************************************
498 * This function is invoked post CPU power up and initialization. It sets the
499 * affinity info state, target power state and requested power state for the
500 * current CPU and all its ancestor power domains to RUN.
501 *****************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +0100502void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
Soby Mathew981487a2015-07-13 14:10:57 +0100503{
Soby Mathew011ca182015-07-29 17:05:03 +0100504 unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
Soby Mathew981487a2015-07-13 14:10:57 +0100505 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
506
507 /* Reset the local_state to RUN for the non cpu power domains. */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100508 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000509 set_non_cpu_pd_node_local_state(parent_idx,
510 PSCI_LOCAL_STATE_RUN);
Soby Mathew981487a2015-07-13 14:10:57 +0100511 psci_set_req_local_pwr_state(lvl,
512 cpu_idx,
513 PSCI_LOCAL_STATE_RUN);
514 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
515 }
516
517 /* Set the affinity info state to ON */
518 psci_set_aff_info_state(AFF_STATE_ON);
519
520 psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000521 psci_flush_cpu_data(psci_svc_cpu_data);
Soby Mathew981487a2015-07-13 14:10:57 +0100522}
523
524/******************************************************************************
Wing Li2c556f32022-09-14 13:18:17 -0700525 * This function is used in platform-coordinated mode.
526 *
Soby Mathew981487a2015-07-13 14:10:57 +0100527 * This function is passed the local power states requested for each power
528 * domain (state_info) between the current CPU domain and its ancestors until
529 * the target power level (end_pwrlvl). It updates the array of requested power
530 * states with this information.
531 *
532 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
533 * retrieves the states requested by all the cpus of which the power domain at
534 * that level is an ancestor. It passes this information to the platform to
535 * coordinate and return the target power state. If the target state for a level
536 * is RUN then subsequent levels are not considered. At the CPU level, state
537 * coordination is not required. Hence, the requested and the target states are
538 * the same.
539 *
540 * The 'state_info' is updated with the target state for each level between the
541 * CPU and the 'end_pwrlvl' and returned to the caller.
542 *
543 * This function will only be invoked with data cache enabled and while
544 * powering down a core.
545 *****************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +0100546void psci_do_state_coordination(unsigned int end_pwrlvl,
547 psci_power_state_t *state_info)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100548{
Soby Mathew981487a2015-07-13 14:10:57 +0100549 unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
Deepika Bhavnani79ffab52019-08-27 00:32:24 +0300550 unsigned int start_idx;
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100551 unsigned int ncpus;
Soby Mathew981487a2015-07-13 14:10:57 +0100552 plat_local_state_t target_state, *req_states;
553
Soby Mathew1298e692016-02-02 14:23:10 +0000554 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
Soby Mathew981487a2015-07-13 14:10:57 +0100555 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
556
557 /* For level 0, the requested state will be equivalent
558 to target state */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100559 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
Soby Mathew981487a2015-07-13 14:10:57 +0100560
561 /* First update the requested power state */
562 psci_set_req_local_pwr_state(lvl, cpu_idx,
563 state_info->pwr_domain_state[lvl]);
564
565 /* Get the requested power states for this power level */
566 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
567 req_states = psci_get_req_local_pwr_states(lvl, start_idx);
568
569 /*
570 * Let the platform coordinate amongst the requested states at
571 * this power level and return the target local power state.
572 */
573 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
574 target_state = plat_get_target_pwr_state(lvl,
575 req_states,
576 ncpus);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100577
Soby Mathew981487a2015-07-13 14:10:57 +0100578 state_info->pwr_domain_state[lvl] = target_state;
579
580 /* Break early if the negotiated target power state is RUN */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100581 if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
Soby Mathew981487a2015-07-13 14:10:57 +0100582 break;
583
584 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
585 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100586
587 /*
Soby Mathew981487a2015-07-13 14:10:57 +0100588 * This is for cases when we break out of the above loop early because
589 * the target power state is RUN at a power level < end_pwlvl.
590 * We update the requested power state from state_info and then
591 * set the target state as RUN.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100592 */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100593 for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
Soby Mathew981487a2015-07-13 14:10:57 +0100594 psci_set_req_local_pwr_state(lvl, cpu_idx,
595 state_info->pwr_domain_state[lvl]);
596 state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100597
Wing Li2c556f32022-09-14 13:18:17 -0700598 }
Wing Li2c556f32022-09-14 13:18:17 -0700599}
600
601#if PSCI_OS_INIT_MODE
602/******************************************************************************
603 * This function is used in OS-initiated mode.
604 *
605 * This function is passed the local power states requested for each power
606 * domain (state_info) between the current CPU domain and its ancestors until
607 * the target power level (end_pwrlvl), and ensures the requested power states
608 * are valid. It updates the array of requested power states with this
609 * information.
610 *
611 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
612 * retrieves the states requested by all the cpus of which the power domain at
613 * that level is an ancestor. It passes this information to the platform to
614 * coordinate and return the target power state. If the requested state does
615 * not match the target state, the request is denied.
616 *
617 * The 'state_info' is not modified.
618 *
619 * This function will only be invoked with data cache enabled and while
620 * powering down a core.
621 *****************************************************************************/
622int psci_validate_state_coordination(unsigned int end_pwrlvl,
623 psci_power_state_t *state_info)
624{
625 int rc = PSCI_E_SUCCESS;
626 unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
627 unsigned int start_idx;
628 unsigned int ncpus;
629 plat_local_state_t target_state, *req_states;
630 plat_local_state_t prev[PLAT_MAX_PWR_LVL];
631
632 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
633 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
634
635 /*
636 * Save a copy of the previous requested local power states and update
637 * the new requested local power states.
638 */
639 psci_update_req_local_pwr_states(end_pwrlvl, cpu_idx, state_info, prev);
640
641 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
642 /* Get the requested power states for this power level */
643 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
644 req_states = psci_get_req_local_pwr_states(lvl, start_idx);
645
646 /*
647 * Let the platform coordinate amongst the requested states at
648 * this power level and return the target local power state.
649 */
650 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
651 target_state = plat_get_target_pwr_state(lvl,
652 req_states,
653 ncpus);
654
655 /*
656 * Verify that the requested power state matches the target
657 * local power state.
658 */
659 if (state_info->pwr_domain_state[lvl] != target_state) {
660 if (target_state == PSCI_LOCAL_STATE_RUN) {
661 rc = PSCI_E_DENIED;
662 } else {
663 rc = PSCI_E_INVALID_PARAMS;
664 }
665 goto exit;
666 }
667 }
668
669 /*
670 * Verify that the current core is the last running core at the
671 * specified power level.
672 */
673 lvl = state_info->last_at_pwrlvl;
674 if (!psci_is_last_cpu_to_idle_at_pwrlvl(lvl)) {
675 rc = PSCI_E_DENIED;
676 }
677
678exit:
679 if (rc != PSCI_E_SUCCESS) {
680 /* Restore the previous requested local power states. */
681 psci_restore_req_local_pwr_states(cpu_idx, prev);
682 return rc;
Soby Mathew981487a2015-07-13 14:10:57 +0100683 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100684
Wing Li2c556f32022-09-14 13:18:17 -0700685 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100686}
Wing Li2c556f32022-09-14 13:18:17 -0700687#endif
Achin Gupta4f6ad662013-10-25 09:08:21 +0100688
Soby Mathew981487a2015-07-13 14:10:57 +0100689/******************************************************************************
690 * This function validates a suspend request by making sure that if a standby
691 * state is requested then no power level is turned off and the highest power
692 * level is placed in a standby/retention state.
693 *
694 * It also ensures that the state level X will enter is not shallower than the
695 * state level X + 1 will enter.
696 *
697 * This validation will be enabled only for DEBUG builds as the platform is
698 * expected to perform these validations as well.
699 *****************************************************************************/
700int psci_validate_suspend_req(const psci_power_state_t *state_info,
701 unsigned int is_power_down_state)
Achin Gupta0959db52013-12-02 17:33:04 +0000702{
Soby Mathew981487a2015-07-13 14:10:57 +0100703 unsigned int max_off_lvl, target_lvl, max_retn_lvl;
704 plat_local_state_t state;
705 plat_local_state_type_t req_state_type, deepest_state_type;
706 int i;
Achin Gupta0959db52013-12-02 17:33:04 +0000707
Soby Mathew981487a2015-07-13 14:10:57 +0100708 /* Find the target suspend power level */
709 target_lvl = psci_find_target_suspend_lvl(state_info);
Soby Mathew011ca182015-07-29 17:05:03 +0100710 if (target_lvl == PSCI_INVALID_PWR_LVL)
Achin Gupta0959db52013-12-02 17:33:04 +0000711 return PSCI_E_INVALID_PARAMS;
712
Soby Mathew981487a2015-07-13 14:10:57 +0100713 /* All power domain levels are in a RUN state to begin with */
714 deepest_state_type = STATE_TYPE_RUN;
715
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100716 for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
Soby Mathew981487a2015-07-13 14:10:57 +0100717 state = state_info->pwr_domain_state[i];
718 req_state_type = find_local_state_type(state);
719
720 /*
721 * While traversing from the highest power level to the lowest,
722 * the state requested for lower levels has to be the same or
723 * deeper i.e. equal to or greater than the state at the higher
724 * levels. If this condition is true, then the requested state
725 * becomes the deepest state encountered so far.
726 */
727 if (req_state_type < deepest_state_type)
728 return PSCI_E_INVALID_PARAMS;
729 deepest_state_type = req_state_type;
730 }
731
732 /* Find the highest off power level */
733 max_off_lvl = psci_find_max_off_lvl(state_info);
734
735 /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
Soby Mathew011ca182015-07-29 17:05:03 +0100736 max_retn_lvl = PSCI_INVALID_PWR_LVL;
Soby Mathew981487a2015-07-13 14:10:57 +0100737 if (target_lvl != max_off_lvl)
738 max_retn_lvl = target_lvl;
739
740 /*
741 * If this is not a request for a power down state then max off level
742 * has to be invalid and max retention level has to be a valid power
743 * level.
744 */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100745 if ((is_power_down_state == 0U) &&
746 ((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
747 (max_retn_lvl == PSCI_INVALID_PWR_LVL)))
Achin Gupta0959db52013-12-02 17:33:04 +0000748 return PSCI_E_INVALID_PARAMS;
749
750 return PSCI_E_SUCCESS;
751}
752
Soby Mathew981487a2015-07-13 14:10:57 +0100753/******************************************************************************
754 * This function finds the highest power level which will be powered down
755 * amongst all the power levels specified in the 'state_info' structure
756 *****************************************************************************/
757unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
Achin Guptacab78e42014-07-28 00:09:01 +0100758{
Soby Mathew981487a2015-07-13 14:10:57 +0100759 int i;
Achin Guptacab78e42014-07-28 00:09:01 +0100760
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100761 for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
762 if (is_local_state_off(state_info->pwr_domain_state[i]) != 0)
763 return (unsigned int) i;
Soby Mathew981487a2015-07-13 14:10:57 +0100764 }
765
Soby Mathew011ca182015-07-29 17:05:03 +0100766 return PSCI_INVALID_PWR_LVL;
Soby Mathew981487a2015-07-13 14:10:57 +0100767}
768
769/******************************************************************************
770 * This functions finds the level of the highest power domain which will be
771 * placed in a low power state during a suspend operation.
772 *****************************************************************************/
773unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
774{
775 int i;
776
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100777 for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
778 if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
779 return (unsigned int) i;
Achin Guptacab78e42014-07-28 00:09:01 +0100780 }
Soby Mathew981487a2015-07-13 14:10:57 +0100781
Soby Mathew011ca182015-07-29 17:05:03 +0100782 return PSCI_INVALID_PWR_LVL;
Achin Guptacab78e42014-07-28 00:09:01 +0100783}
784
785/*******************************************************************************
Andrew F. Davis74e89782019-06-04 10:46:54 -0400786 * This function is passed the highest level in the topology tree that the
787 * operation should be applied to and a list of node indexes. It picks up locks
788 * from the node index list in order of increasing power domain level in the
789 * range specified.
Achin Gupta0959db52013-12-02 17:33:04 +0000790 ******************************************************************************/
Andrew F. Davis74e89782019-06-04 10:46:54 -0400791void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
792 const unsigned int *parent_nodes)
Achin Gupta0959db52013-12-02 17:33:04 +0000793{
Andrew F. Davis74e89782019-06-04 10:46:54 -0400794 unsigned int parent_idx;
Soby Mathew011ca182015-07-29 17:05:03 +0100795 unsigned int level;
Achin Gupta0959db52013-12-02 17:33:04 +0000796
Soby Mathew981487a2015-07-13 14:10:57 +0100797 /* No locking required for level 0. Hence start locking from level 1 */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100798 for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
Andrew F. Davis74e89782019-06-04 10:46:54 -0400799 parent_idx = parent_nodes[level - 1U];
Soby Mathew981487a2015-07-13 14:10:57 +0100800 psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
Achin Gupta0959db52013-12-02 17:33:04 +0000801 }
802}
803
804/*******************************************************************************
Andrew F. Davis74e89782019-06-04 10:46:54 -0400805 * This function is passed the highest level in the topology tree that the
806 * operation should be applied to and a list of node indexes. It releases the
807 * locks in order of decreasing power domain level in the range specified.
Achin Gupta0959db52013-12-02 17:33:04 +0000808 ******************************************************************************/
Andrew F. Davis74e89782019-06-04 10:46:54 -0400809void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
810 const unsigned int *parent_nodes)
Achin Gupta0959db52013-12-02 17:33:04 +0000811{
Andrew F. Davis74e89782019-06-04 10:46:54 -0400812 unsigned int parent_idx;
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100813 unsigned int level;
Achin Gupta0959db52013-12-02 17:33:04 +0000814
Soby Mathew981487a2015-07-13 14:10:57 +0100815 /* Unlock top down. No unlocking required for level 0. */
Zelalem91d80612020-02-12 10:37:03 -0600816 for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) {
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100817 parent_idx = parent_nodes[level - 1U];
Soby Mathew981487a2015-07-13 14:10:57 +0100818 psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
Achin Gupta0959db52013-12-02 17:33:04 +0000819 }
820}
821
822/*******************************************************************************
Andrew Thoelke4e126072014-06-04 21:10:52 +0100823 * This function determines the full entrypoint information for the requested
Soby Mathew8595b872015-01-06 15:36:38 +0000824 * PSCI entrypoint on power on/resume and returns it.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100825 ******************************************************************************/
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700826#ifdef __aarch64__
Soby Mathewf1f97a12015-07-15 12:13:26 +0100827static int psci_get_ns_ep_info(entry_point_info_t *ep,
Soby Mathew011ca182015-07-29 17:05:03 +0100828 uintptr_t entrypoint,
829 u_register_t context_id)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100830{
Soby Mathewa0fedc42016-06-16 14:52:04 +0100831 u_register_t ep_attr, sctlr;
Soby Mathew011ca182015-07-29 17:05:03 +0100832 unsigned int daif, ee, mode;
Soby Mathewa0fedc42016-06-16 14:52:04 +0100833 u_register_t ns_scr_el3 = read_scr_el3();
834 u_register_t ns_sctlr_el1 = read_sctlr_el1();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100835
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100836 sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
837 read_sctlr_el2() : ns_sctlr_el1;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100838 ee = 0;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100839
Andrew Thoelke4e126072014-06-04 21:10:52 +0100840 ep_attr = NON_SECURE | EP_ST_DISABLE;
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100841 if ((sctlr & SCTLR_EE_BIT) != 0U) {
Andrew Thoelke4e126072014-06-04 21:10:52 +0100842 ep_attr |= EP_EE_BIG;
843 ee = 1;
844 }
Soby Mathew8595b872015-01-06 15:36:38 +0000845 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100846
Soby Mathew8595b872015-01-06 15:36:38 +0000847 ep->pc = entrypoint;
Douglas Raillarda8954fc2017-01-26 15:54:44 +0000848 zeromem(&ep->args, sizeof(ep->args));
Soby Mathew8595b872015-01-06 15:36:38 +0000849 ep->args.arg0 = context_id;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100850
851 /*
852 * Figure out whether the cpu enters the non-secure address space
853 * in aarch32 or aarch64
854 */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100855 if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100856
857 /*
858 * Check whether a Thumb entry point has been provided for an
859 * aarch64 EL
860 */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100861 if ((entrypoint & 0x1UL) != 0UL)
Soby Mathewf1f97a12015-07-15 12:13:26 +0100862 return PSCI_E_INVALID_ADDRESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100863
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100864 mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100865
Jimmy Brissoned202072020-08-04 16:18:52 -0500866 ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX,
867 DISABLE_ALL_EXCEPTIONS);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100868 } else {
869
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100870 mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
871 MODE32_hyp : MODE32_svc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100872
873 /*
874 * TODO: Choose async. exception bits if HYP mode is not
875 * implemented according to the values of SCR.{AW, FW} bits
876 */
Vikram Kanigiri9851e422014-05-13 14:42:08 +0100877 daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
878
Jimmy Brissoned202072020-08-04 16:18:52 -0500879 ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee,
880 daif);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100881 }
882
Andrew Thoelke4e126072014-06-04 21:10:52 +0100883 return PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100884}
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700885#else /* !__aarch64__ */
886static int psci_get_ns_ep_info(entry_point_info_t *ep,
887 uintptr_t entrypoint,
888 u_register_t context_id)
889{
890 u_register_t ep_attr;
891 unsigned int aif, ee, mode;
892 u_register_t scr = read_scr();
893 u_register_t ns_sctlr, sctlr;
894
895 /* Switch to non secure state */
896 write_scr(scr | SCR_NS_BIT);
897 isb();
898 ns_sctlr = read_sctlr();
899
900 sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
901
902 /* Return to original state */
903 write_scr(scr);
904 isb();
905 ee = 0;
906
907 ep_attr = NON_SECURE | EP_ST_DISABLE;
908 if (sctlr & SCTLR_EE_BIT) {
909 ep_attr |= EP_EE_BIG;
910 ee = 1;
911 }
912 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
913
914 ep->pc = entrypoint;
915 zeromem(&ep->args, sizeof(ep->args));
916 ep->args.arg0 = context_id;
917
918 mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
919
920 /*
921 * TODO: Choose async. exception bits if HYP mode is not
922 * implemented according to the values of SCR.{AW, FW} bits
923 */
924 aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
925
926 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
927
928 return PSCI_E_SUCCESS;
929}
930
931#endif /* __aarch64__ */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100932
933/*******************************************************************************
Soby Mathewf1f97a12015-07-15 12:13:26 +0100934 * This function validates the entrypoint with the platform layer if the
935 * appropriate pm_ops hook is exported by the platform and returns the
936 * 'entry_point_info'.
937 ******************************************************************************/
938int psci_validate_entry_point(entry_point_info_t *ep,
Soby Mathew011ca182015-07-29 17:05:03 +0100939 uintptr_t entrypoint,
940 u_register_t context_id)
Soby Mathewf1f97a12015-07-15 12:13:26 +0100941{
942 int rc;
943
944 /* Validate the entrypoint using platform psci_ops */
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100945 if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
Soby Mathewf1f97a12015-07-15 12:13:26 +0100946 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
947 if (rc != PSCI_E_SUCCESS)
948 return PSCI_E_INVALID_ADDRESS;
949 }
950
951 /*
952 * Verify and derive the re-entry information for
953 * the non-secure world from the non-secure state from
954 * where this call originated.
955 */
956 rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
957 return rc;
958}
959
960/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100961 * Generic handler which is called when a cpu is physically powered on. It
Soby Mathew981487a2015-07-13 14:10:57 +0100962 * traverses the node information and finds the highest power level powered
963 * off and performs generic, architectural, platform setup and state management
964 * to power on that power level and power levels below it.
965 * e.g. For a cpu that's been powered on, it will call the platform specific
966 * code to enable the gic cpu interface and for a cluster it will enable
967 * coherency at the interconnect level in addition to gic cpu interface.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100968 ******************************************************************************/
Soby Mathewd0194872016-04-29 19:01:30 +0100969void psci_warmboot_entrypoint(void)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100970{
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +0100971 unsigned int end_pwrlvl;
Deepika Bhavnani79ffab52019-08-27 00:32:24 +0300972 unsigned int cpu_idx = plat_my_core_pos();
Andrew F. Davis74e89782019-06-04 10:46:54 -0400973 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
Soby Mathew981487a2015-07-13 14:10:57 +0100974 psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
Achin Gupta4f6ad662013-10-25 09:08:21 +0100975
Boyan Karatotev36cebf92023-03-08 11:56:49 +0000976 /* Init registers that never change for the lifetime of TF-A */
977 cm_manage_extensions_el3();
978
Achin Gupta4f6ad662013-10-25 09:08:21 +0100979 /*
Soby Mathew981487a2015-07-13 14:10:57 +0100980 * Verify that we have been explicitly turned ON or resumed from
981 * suspend.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100982 */
Soby Mathew981487a2015-07-13 14:10:57 +0100983 if (psci_get_aff_info_state() == AFF_STATE_OFF) {
Andrew Walbran8fe72b92020-01-23 16:22:44 +0000984 ERROR("Unexpected affinity info state.\n");
James Morrissey40a6f642014-02-10 14:24:36 +0000985 panic();
Soby Mathew981487a2015-07-13 14:10:57 +0100986 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100987
988 /*
Soby Mathew981487a2015-07-13 14:10:57 +0100989 * Get the maximum power domain level to traverse to after this cpu
990 * has been physically powered up.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100991 */
Soby Mathew981487a2015-07-13 14:10:57 +0100992 end_pwrlvl = get_power_on_target_pwrlvl();
Achin Guptaf6b9e992014-07-31 11:19:11 +0100993
Andrew F. Davis74e89782019-06-04 10:46:54 -0400994 /* Get the parent nodes */
995 psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
996
Achin Guptaf6b9e992014-07-31 11:19:11 +0100997 /*
Soby Mathew981487a2015-07-13 14:10:57 +0100998 * This function acquires the lock corresponding to each power level so
999 * that by the time all locks are taken, the system topology is snapshot
1000 * and state management can be done safely.
Achin Guptaf6b9e992014-07-31 11:19:11 +01001001 */
Andrew F. Davis74e89782019-06-04 10:46:54 -04001002 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
Achin Guptaf6b9e992014-07-31 11:19:11 +01001003
Soby Mathew8336f682017-10-16 15:19:31 +01001004 psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
1005
Yatharth Kochar241ec6c2016-05-09 18:26:35 +01001006#if ENABLE_PSCI_STAT
dp-arm66abfbe2017-01-31 13:01:04 +00001007 plat_psci_stat_accounting_stop(&state_info);
Yatharth Kochar241ec6c2016-05-09 18:26:35 +01001008#endif
1009
Achin Gupta4f6ad662013-10-25 09:08:21 +01001010 /*
Soby Mathew981487a2015-07-13 14:10:57 +01001011 * This CPU could be resuming from suspend or it could have just been
1012 * turned on. To distinguish between these 2 cases, we examine the
1013 * affinity state of the CPU:
1014 * - If the affinity state is ON_PENDING then it has just been
1015 * turned on.
1016 * - Else it is resuming from suspend.
1017 *
1018 * Depending on the type of warm reset identified, choose the right set
1019 * of power management handler and perform the generic, architecture
1020 * and platform specific handling.
Achin Guptacab78e42014-07-28 00:09:01 +01001021 */
Soby Mathew981487a2015-07-13 14:10:57 +01001022 if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
1023 psci_cpu_on_finish(cpu_idx, &state_info);
1024 else
1025 psci_cpu_suspend_finish(cpu_idx, &state_info);
Achin Guptacab78e42014-07-28 00:09:01 +01001026
1027 /*
Boyan Karatotev8ee32142023-05-17 12:20:09 +01001028 * Generic management: Now we just need to retrieve the
1029 * information that we had stashed away during the cpu_on
1030 * call to set this cpu on its way.
1031 */
1032 cm_prepare_el3_exit_ns();
1033
1034 /*
Soby Mathew981487a2015-07-13 14:10:57 +01001035 * Set the requested and target state of this CPU and all the higher
1036 * power domains which are ancestors of this CPU to run.
Achin Guptaf6b9e992014-07-31 11:19:11 +01001037 */
Soby Mathew981487a2015-07-13 14:10:57 +01001038 psci_set_pwr_domains_to_run(end_pwrlvl);
Achin Guptaf6b9e992014-07-31 11:19:11 +01001039
Yatharth Kochar241ec6c2016-05-09 18:26:35 +01001040#if ENABLE_PSCI_STAT
1041 /*
1042 * Update PSCI stats.
1043 * Caches are off when writing stats data on the power down path.
1044 * Since caches are now enabled, it's necessary to do cache
1045 * maintenance before reading that same data.
1046 */
dp-arm66abfbe2017-01-31 13:01:04 +00001047 psci_stats_update_pwr_up(end_pwrlvl, &state_info);
Yatharth Kochar241ec6c2016-05-09 18:26:35 +01001048#endif
1049
Achin Guptaf6b9e992014-07-31 11:19:11 +01001050 /*
Soby Mathew981487a2015-07-13 14:10:57 +01001051 * This loop releases the lock corresponding to each power level
Achin Gupta0959db52013-12-02 17:33:04 +00001052 * in the reverse order to which they were acquired.
1053 */
Andrew F. Davis74e89782019-06-04 10:46:54 -04001054 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +01001055}
Jeenu Viswambharan7f366602014-02-20 17:11:00 +00001056
1057/*******************************************************************************
1058 * This function initializes the set of hooks that PSCI invokes as part of power
1059 * management operation. The power management hooks are expected to be provided
1060 * by the SPD, after it finishes all its initialization
1061 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +01001062void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
Jeenu Viswambharan7f366602014-02-20 17:11:00 +00001063{
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001064 assert(pm != NULL);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +00001065 psci_spd_pm = pm;
Soby Mathew6cdddaf2015-01-07 11:10:22 +00001066
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001067 if (pm->svc_migrate != NULL)
Soby Mathew6cdddaf2015-01-07 11:10:22 +00001068 psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
1069
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001070 if (pm->svc_migrate_info != NULL)
Soby Mathew6cdddaf2015-01-07 11:10:22 +00001071 psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
1072 | define_psci_cap(PSCI_MIG_INFO_TYPE);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +00001073}
Juan Castillo4dc4a472014-08-12 11:17:06 +01001074
1075/*******************************************************************************
Soby Mathew110fe362014-10-23 10:35:34 +01001076 * This function invokes the migrate info hook in the spd_pm_ops. It performs
1077 * the necessary return value validation. If the Secure Payload is UP and
1078 * migrate capable, it returns the mpidr of the CPU on which the Secure payload
1079 * is resident through the mpidr parameter. Else the value of the parameter on
1080 * return is undefined.
1081 ******************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +01001082int psci_spd_migrate_info(u_register_t *mpidr)
Soby Mathew110fe362014-10-23 10:35:34 +01001083{
1084 int rc;
1085
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001086 if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
Soby Mathew110fe362014-10-23 10:35:34 +01001087 return PSCI_E_NOT_SUPPORTED;
1088
1089 rc = psci_spd_pm->svc_migrate_info(mpidr);
1090
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001091 assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
1092 (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
Soby Mathew110fe362014-10-23 10:35:34 +01001093
1094 return rc;
1095}
1096
1097
1098/*******************************************************************************
Soby Mathew981487a2015-07-13 14:10:57 +01001099 * This function prints the state of all power domains present in the
Juan Castillo4dc4a472014-08-12 11:17:06 +01001100 * system
1101 ******************************************************************************/
Soby Mathew981487a2015-07-13 14:10:57 +01001102void psci_print_power_domain_map(void)
Juan Castillo4dc4a472014-08-12 11:17:06 +01001103{
1104#if LOG_LEVEL >= LOG_LEVEL_INFO
Pankaj Gupta02c35682019-10-15 15:44:45 +05301105 unsigned int idx;
Soby Mathew981487a2015-07-13 14:10:57 +01001106 plat_local_state_t state;
1107 plat_local_state_type_t state_type;
1108
Juan Castillo4dc4a472014-08-12 11:17:06 +01001109 /* This array maps to the PSCI_STATE_X definitions in psci.h */
Soby Mathew24ab34f2016-05-03 17:11:42 +01001110 static const char * const psci_state_type_str[] = {
Juan Castillo4dc4a472014-08-12 11:17:06 +01001111 "ON",
Soby Mathew981487a2015-07-13 14:10:57 +01001112 "RETENTION",
Juan Castillo4dc4a472014-08-12 11:17:06 +01001113 "OFF",
Juan Castillo4dc4a472014-08-12 11:17:06 +01001114 };
1115
Soby Mathew981487a2015-07-13 14:10:57 +01001116 INFO("PSCI Power Domain Map:\n");
Pankaj Gupta02c35682019-10-15 15:44:45 +05301117 for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count);
Soby Mathew981487a2015-07-13 14:10:57 +01001118 idx++) {
1119 state_type = find_local_state_type(
1120 psci_non_cpu_pd_nodes[idx].local_state);
Yann Gautier507e0cd2022-02-14 11:09:23 +01001121 INFO(" Domain Node : Level %u, parent_node %u,"
Soby Mathew981487a2015-07-13 14:10:57 +01001122 " State %s (0x%x)\n",
1123 psci_non_cpu_pd_nodes[idx].level,
1124 psci_non_cpu_pd_nodes[idx].parent_node,
1125 psci_state_type_str[state_type],
1126 psci_non_cpu_pd_nodes[idx].local_state);
1127 }
1128
Pankaj Gupta02c35682019-10-15 15:44:45 +05301129 for (idx = 0; idx < psci_plat_core_count; idx++) {
Soby Mathew981487a2015-07-13 14:10:57 +01001130 state = psci_get_cpu_local_state_by_idx(idx);
1131 state_type = find_local_state_type(state);
Yann Gautier507e0cd2022-02-14 11:09:23 +01001132 INFO(" CPU Node : MPID 0x%llx, parent_node %u,"
Soby Mathew981487a2015-07-13 14:10:57 +01001133 " State %s (0x%x)\n",
Soby Mathewa0fedc42016-06-16 14:52:04 +01001134 (unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
Soby Mathew981487a2015-07-13 14:10:57 +01001135 psci_cpu_pd_nodes[idx].parent_node,
1136 psci_state_type_str[state_type],
1137 psci_get_cpu_local_state_by_idx(idx));
Juan Castillo4dc4a472014-08-12 11:17:06 +01001138 }
1139#endif
1140}
Soby Mathew981487a2015-07-13 14:10:57 +01001141
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +00001142/******************************************************************************
1143 * Return whether any secondaries were powered up with CPU_ON call. A CPU that
1144 * have ever been powered up would have set its MPDIR value to something other
1145 * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
1146 * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
1147 * meaningful only when called on the primary CPU during early boot.
1148 *****************************************************************************/
1149int psci_secondaries_brought_up(void)
1150{
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001151 unsigned int idx, n_valid = 0U;
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +00001152
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001153 for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +00001154 if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
1155 n_valid++;
1156 }
1157
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001158 assert(n_valid > 0U);
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +00001159
Antonio Nino Diaz78a95a62018-07-17 15:10:08 +01001160 return (n_valid > 1U) ? 1 : 0;
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +00001161}
1162
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +00001163/*******************************************************************************
1164 * Initiate power down sequence, by calling power down operations registered for
1165 * this CPU.
1166 ******************************************************************************/
Pranav Madhuc1e61d02022-07-22 23:11:16 +05301167void psci_pwrdown_cpu(unsigned int power_level)
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +00001168{
Jayanth Dodderi Chidanand165e59f2023-09-14 11:07:02 +01001169 psci_do_manage_extensions();
1170
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +00001171#if HW_ASSISTED_COHERENCY
1172 /*
1173 * With hardware-assisted coherency, the CPU drivers only initiate the
1174 * power down sequence, without performing cache-maintenance operations
Andrew F. Davis564f9542018-08-30 12:08:01 -05001175 * in software. Data caches enabled both before and after this call.
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +00001176 */
1177 prepare_cpu_pwr_dwn(power_level);
1178#else
1179 /*
1180 * Without hardware-assisted coherency, the CPU drivers disable data
Andrew F. Davis564f9542018-08-30 12:08:01 -05001181 * caches, then perform cache-maintenance operations in software.
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +00001182 *
Andrew F. Davis564f9542018-08-30 12:08:01 -05001183 * This also calls prepare_cpu_pwr_dwn() to initiate power down
1184 * sequence, but that function will return with data caches disabled.
1185 * We must ensure that the stack memory is flushed out to memory before
1186 * we start popping from it again.
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +00001187 */
1188 psci_do_pwrdown_cache_maintenance(power_level);
1189#endif
1190}
Sandeep Tripathy12030042020-08-17 20:22:13 +05301191
1192/*******************************************************************************
1193 * This function invokes the callback 'stop_func()' with the 'mpidr' of each
1194 * online PE. Caller can pass suitable method to stop a remote core.
1195 *
1196 * 'wait_ms' is the timeout value in milliseconds for the other cores to
1197 * transition to power down state. Passing '0' makes it non-blocking.
1198 *
1199 * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the
1200 * given timeout.
1201 ******************************************************************************/
1202int psci_stop_other_cores(unsigned int wait_ms,
1203 void (*stop_func)(u_register_t mpidr))
1204{
1205 unsigned int idx, this_cpu_idx;
1206
1207 this_cpu_idx = plat_my_core_pos();
1208
1209 /* Invoke stop_func for each core */
1210 for (idx = 0U; idx < psci_plat_core_count; idx++) {
1211 /* skip current CPU */
1212 if (idx == this_cpu_idx) {
1213 continue;
1214 }
1215
1216 /* Check if the CPU is ON */
1217 if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) {
1218 (*stop_func)(psci_cpu_pd_nodes[idx].mpidr);
1219 }
1220 }
1221
1222 /* Need to wait for other cores to shutdown */
1223 if (wait_ms != 0U) {
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +01001224 while ((wait_ms-- != 0U) && (!psci_is_last_on_cpu())) {
Sandeep Tripathy12030042020-08-17 20:22:13 +05301225 mdelay(1U);
1226 }
1227
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +01001228 if (!psci_is_last_on_cpu()) {
Sandeep Tripathy12030042020-08-17 20:22:13 +05301229 WARN("Failed to stop all cores!\n");
1230 psci_print_power_domain_map();
1231 return PSCI_E_DENIED;
1232 }
1233 }
1234
1235 return PSCI_E_SUCCESS;
1236}
Lucian Paul-Trifu5e685352022-03-02 21:28:24 +00001237
1238/*******************************************************************************
1239 * This function verifies that all the other cores in the system have been
1240 * turned OFF and the current CPU is the last running CPU in the system.
1241 * Returns true if the current CPU is the last ON CPU or false otherwise.
1242 *
1243 * This API has following differences with psci_is_last_on_cpu
1244 * 1. PSCI states are locked
Lucian Paul-Trifu5e685352022-03-02 21:28:24 +00001245 ******************************************************************************/
1246bool psci_is_last_on_cpu_safe(void)
1247{
1248 unsigned int this_core = plat_my_core_pos();
1249 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
Lucian Paul-Trifu5e685352022-03-02 21:28:24 +00001250
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +01001251 psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
Lucian Paul-Trifu5e685352022-03-02 21:28:24 +00001252
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +01001253 psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
Lucian Paul-Trifu5e685352022-03-02 21:28:24 +00001254
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +01001255 if (!psci_is_last_on_cpu()) {
Lucian Paul-Trifu5e685352022-03-02 21:28:24 +00001256 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +01001257 return false;
Lucian Paul-Trifu5e685352022-03-02 21:28:24 +00001258 }
1259
Jayanth Dodderi Chidanand70763502022-08-22 23:46:10 +01001260 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1261
Lucian Paul-Trifu5e685352022-03-02 21:28:24 +00001262 return true;
1263}
Wing Li71f69df2022-09-14 13:18:15 -07001264
1265/*******************************************************************************
1266 * This function verifies that all cores in the system have been turned ON.
1267 * Returns true, if all CPUs are ON or false otherwise.
1268 *
1269 * This API has following differences with psci_are_all_cpus_on
1270 * 1. PSCI states are locked
1271 ******************************************************************************/
1272bool psci_are_all_cpus_on_safe(void)
1273{
1274 unsigned int this_core = plat_my_core_pos();
1275 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1276
1277 psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1278
1279 psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1280
1281 if (!psci_are_all_cpus_on()) {
1282 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1283 return false;
1284 }
1285
1286 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1287
1288 return true;
1289}
Jayanth Dodderi Chidanand165e59f2023-09-14 11:07:02 +01001290
1291/*******************************************************************************
1292 * This function performs architectural feature specific management.
1293 * It ensures the architectural features are disabled during cpu
1294 * power off/suspend operations.
1295 ******************************************************************************/
1296void psci_do_manage_extensions(void)
1297{
Jayanth Dodderi Chidanand18d93792023-07-18 14:48:09 +01001298 /*
1299 * On power down we need to disable statistical profiling extensions
1300 * before exiting coherency.
1301 */
1302 if (is_feat_spe_supported()) {
1303 spe_disable();
1304 }
Jayanth Dodderi Chidanand165e59f2023-09-14 11:07:02 +01001305
1306}