blob: 6d5c099fbd5812d13717179f7acfa1f2fe85f9b5 [file] [log] [blame]
Soby Mathew991d42c2015-06-29 16:30:12 +01001/*
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +01002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathew991d42c2015-06-29 16:30:12 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew991d42c2015-06-29 16:30:12 +01005 */
6
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00007#include <assert.h>
8#include <stddef.h>
9
Soby Mathew991d42c2015-06-29 16:30:12 +010010#include <arch.h>
11#include <arch_helpers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012#include <common/bl_common.h>
13#include <common/debug.h>
Soby Mathew991d42c2015-06-29 16:30:12 +010014#include <context.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <lib/el3_runtime/context_mgmt.h>
16#include <lib/el3_runtime/cpu_data.h>
17#include <lib/el3_runtime/pubsub_events.h>
18#include <lib/pmf/pmf.h>
19#include <lib/runtime_instr.h>
20#include <plat/common/platform.h>
21
Soby Mathew991d42c2015-06-29 16:30:12 +010022#include "psci_private.h"
23
Soby Mathew991d42c2015-06-29 16:30:12 +010024/*******************************************************************************
Soby Mathew85dbf5a2015-04-07 12:16:56 +010025 * This function does generic and platform specific operations after a wake-up
26 * from standby/retention states at multiple power levels.
Soby Mathew991d42c2015-06-29 16:30:12 +010027 ******************************************************************************/
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +010028static void psci_suspend_to_standby_finisher(int cpu_idx,
Soby Mathew85dbf5a2015-04-07 12:16:56 +010029 unsigned int end_pwrlvl)
Soby Mathew991d42c2015-06-29 16:30:12 +010030{
Andrew F. Davis74e89782019-06-04 10:46:54 -040031 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
Achin Gupta9b2bf252016-06-28 16:46:15 +010032 psci_power_state_t state_info;
33
Andrew F. Davis74e89782019-06-04 10:46:54 -040034 /* Get the parent nodes */
35 psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
36
37 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
Soby Mathew991d42c2015-06-29 16:30:12 +010038
Soby Mathew85dbf5a2015-04-07 12:16:56 +010039 /*
Achin Gupta9b2bf252016-06-28 16:46:15 +010040 * Find out which retention states this CPU has exited from until the
41 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
42 * state as a result of state coordination amongst other CPUs post wfi.
43 */
44 psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
45
Soby Mathew8336f682017-10-16 15:19:31 +010046#if ENABLE_PSCI_STAT
47 plat_psci_stat_accounting_stop(&state_info);
48 psci_stats_update_pwr_up(end_pwrlvl, &state_info);
49#endif
50
Achin Gupta9b2bf252016-06-28 16:46:15 +010051 /*
Soby Mathew85dbf5a2015-04-07 12:16:56 +010052 * Plat. management: Allow the platform to do operations
53 * on waking up from retention.
54 */
Achin Gupta9b2bf252016-06-28 16:46:15 +010055 psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +010056
Soby Mathew85dbf5a2015-04-07 12:16:56 +010057 /*
58 * Set the requested and target state of this CPU and all the higher
59 * power domain levels for this CPU to run.
60 */
61 psci_set_pwr_domains_to_run(end_pwrlvl);
Soby Mathew991d42c2015-06-29 16:30:12 +010062
Andrew F. Davis74e89782019-06-04 10:46:54 -040063 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
Soby Mathew991d42c2015-06-29 16:30:12 +010064}
65
66/*******************************************************************************
Soby Mathew85dbf5a2015-04-07 12:16:56 +010067 * This function does generic and platform specific suspend to power down
68 * operations.
Soby Mathew991d42c2015-06-29 16:30:12 +010069 ******************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +010070static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +010071 const entry_point_info_t *ep,
72 const psci_power_state_t *state_info)
Soby Mathew991d42c2015-06-29 16:30:12 +010073{
Achin Gupta9a0ff9b2015-09-07 20:43:27 +010074 unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
75
Dimitris Papastamosd1a18412017-11-28 15:16:00 +000076 PUBLISH_EVENT(psci_suspend_pwrdown_start);
77
Soby Mathew85dbf5a2015-04-07 12:16:56 +010078 /* Save PSCI target power level for the suspend finisher handler */
79 psci_set_suspend_pwrlvl(end_pwrlvl);
Soby Mathew991d42c2015-06-29 16:30:12 +010080
Soby Mathew85dbf5a2015-04-07 12:16:56 +010081 /*
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +000082 * Flush the target power level as it might be accessed on power up with
Soby Mathew85dbf5a2015-04-07 12:16:56 +010083 * Data cache disabled.
84 */
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +000085 psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
Soby Mathew991d42c2015-06-29 16:30:12 +010086
Soby Mathew85dbf5a2015-04-07 12:16:56 +010087 /*
88 * Call the cpu suspend handler registered by the Secure Payload
89 * Dispatcher to let it do any book-keeping. If the handler encounters an
90 * error, it's expected to assert within
91 */
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +010092 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL))
Achin Gupta9a0ff9b2015-09-07 20:43:27 +010093 psci_spd_pm->svc_suspend(max_off_lvl);
Soby Mathew991d42c2015-06-29 16:30:12 +010094
Varun Wadekarae87f4b2017-07-10 16:02:05 -070095#if !HW_ASSISTED_COHERENCY
96 /*
97 * Plat. management: Allow the platform to perform any early
98 * actions required to power down the CPU. This might be useful for
99 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
100 * actions with data caches enabled.
101 */
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100102 if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL)
Varun Wadekarae87f4b2017-07-10 16:02:05 -0700103 psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
104#endif
105
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100106 /*
107 * Store the re-entry information for the non-secure world.
108 */
109 cm_init_my_context(ep);
Soby Mathew991d42c2015-06-29 16:30:12 +0100110
dp-arm2d92de62016-11-15 13:25:30 +0000111#if ENABLE_RUNTIME_INSTRUMENTATION
112
113 /*
114 * Flush cache line so that even if CPU power down happens
115 * the timestamp update is reflected in memory.
116 */
117 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
118 RT_INSTR_ENTER_CFLUSH,
119 PMF_CACHE_MAINT);
120#endif
121
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100122 /*
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000123 * Arch. management. Initiate power down sequence.
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100124 * TODO : Introduce a mechanism to query the cache level to flush
125 * and the cpu-ops power down to perform from the platform.
126 */
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000127 psci_do_pwrdown_sequence(max_off_lvl);
dp-arm2d92de62016-11-15 13:25:30 +0000128
129#if ENABLE_RUNTIME_INSTRUMENTATION
130 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
131 RT_INSTR_EXIT_CFLUSH,
132 PMF_NO_CACHE_MAINT);
133#endif
Soby Mathew991d42c2015-06-29 16:30:12 +0100134}
135
136/*******************************************************************************
Soby Mathew991d42c2015-06-29 16:30:12 +0100137 * Top level handler which is called when a cpu wants to suspend its execution.
Soby Mathew3a9e8bf2015-05-05 16:33:16 +0100138 * It is assumed that along with suspending the cpu power domain, power domains
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100139 * at higher levels until the target power level will be suspended as well. It
140 * coordinates with the platform to negotiate the target state for each of
141 * the power domain level till the target power domain level. It then performs
142 * generic, architectural, platform setup and state management required to
143 * suspend that power domain level and power domain levels below it.
144 * e.g. For a cpu that's to be suspended, it could mean programming the
145 * power controller whereas for a cluster that's to be suspended, it will call
146 * the platform specific code which will disable coherency at the interconnect
147 * level if the cpu is the last in the cluster and also the program the power
148 * controller.
Soby Mathew991d42c2015-06-29 16:30:12 +0100149 *
150 * All the required parameter checks are performed at the beginning and after
Soby Mathew6b8b3022015-06-30 11:00:24 +0100151 * the state transition has been done, no further error is expected and it is
152 * not possible to undo any of the actions taken beyond that point.
Soby Mathew991d42c2015-06-29 16:30:12 +0100153 ******************************************************************************/
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100154void psci_cpu_suspend_start(const entry_point_info_t *ep,
Soby Mathew011ca182015-07-29 17:05:03 +0100155 unsigned int end_pwrlvl,
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100156 psci_power_state_t *state_info,
157 unsigned int is_power_down_state)
Soby Mathew991d42c2015-06-29 16:30:12 +0100158{
159 int skip_wfi = 0;
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100160 int idx = (int) plat_my_core_pos();
Andrew F. Davis74e89782019-06-04 10:46:54 -0400161 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
Soby Mathew991d42c2015-06-29 16:30:12 +0100162
163 /*
164 * This function must only be called on platforms where the
165 * CPU_SUSPEND platform hooks have been implemented.
166 */
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100167 assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
168 (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL));
Soby Mathew991d42c2015-06-29 16:30:12 +0100169
Andrew F. Davis74e89782019-06-04 10:46:54 -0400170 /* Get the parent nodes */
171 psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
172
Soby Mathew991d42c2015-06-29 16:30:12 +0100173 /*
Soby Mathew3a9e8bf2015-05-05 16:33:16 +0100174 * This function acquires the lock corresponding to each power
Soby Mathew991d42c2015-06-29 16:30:12 +0100175 * level so that by the time all locks are taken, the system topology
176 * is snapshot and state management can be done safely.
177 */
Andrew F. Davis74e89782019-06-04 10:46:54 -0400178 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
Soby Mathew991d42c2015-06-29 16:30:12 +0100179
180 /*
181 * We check if there are any pending interrupts after the delay
182 * introduced by lock contention to increase the chances of early
183 * detection that a wake-up interrupt has fired.
184 */
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100185 if (read_isr_el1() != 0U) {
Soby Mathew991d42c2015-06-29 16:30:12 +0100186 skip_wfi = 1;
187 goto exit;
188 }
189
190 /*
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100191 * This function is passed the requested state info and
192 * it returns the negotiated state info for each power level upto
193 * the end level specified.
Soby Mathew991d42c2015-06-29 16:30:12 +0100194 */
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100195 psci_do_state_coordination(end_pwrlvl, state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +0100196
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100197#if ENABLE_PSCI_STAT
198 /* Update the last cpu for each level till end_pwrlvl */
199 psci_stats_update_pwr_down(end_pwrlvl, state_info);
200#endif
201
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100202 if (is_power_down_state != 0U)
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100203 psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +0100204
Soby Mathew6b8b3022015-06-30 11:00:24 +0100205 /*
206 * Plat. management: Allow the platform to perform the
207 * necessary actions to turn off this cpu e.g. set the
208 * platform defined mailbox with the psci entrypoint,
209 * program the power controller etc.
210 */
Sandrine Bailleux574d6852015-06-11 10:46:48 +0100211 psci_plat_pm_ops->pwr_domain_suspend(state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +0100212
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100213#if ENABLE_PSCI_STAT
dp-arm66abfbe2017-01-31 13:01:04 +0000214 plat_psci_stat_accounting_start(state_info);
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100215#endif
216
Soby Mathew991d42c2015-06-29 16:30:12 +0100217exit:
218 /*
Soby Mathew3a9e8bf2015-05-05 16:33:16 +0100219 * Release the locks corresponding to each power level in the
Soby Mathew991d42c2015-06-29 16:30:12 +0100220 * reverse order to which they were acquired.
221 */
Andrew F. Davis74e89782019-06-04 10:46:54 -0400222 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
223
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100224 if (skip_wfi == 1)
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100225 return;
226
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100227 if (is_power_down_state != 0U) {
dp-arm3cac7862016-09-19 11:18:44 +0100228#if ENABLE_RUNTIME_INSTRUMENTATION
229
230 /*
231 * Update the timestamp with cache off. We assume this
232 * timestamp can only be read from the current CPU and the
233 * timestamp cache line will be flushed before return to
234 * normal world on wakeup.
235 */
236 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
237 RT_INSTR_ENTER_HW_LOW_PWR,
238 PMF_NO_CACHE_MAINT);
239#endif
240
Soby Mathew6a816412016-04-27 14:46:28 +0100241 /* The function calls below must not return */
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100242 if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL)
Soby Mathew6a816412016-04-27 14:46:28 +0100243 psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
244 else
245 psci_power_down_wfi();
246 }
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100247
dp-arm3cac7862016-09-19 11:18:44 +0100248#if ENABLE_RUNTIME_INSTRUMENTATION
249 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
250 RT_INSTR_ENTER_HW_LOW_PWR,
251 PMF_NO_CACHE_MAINT);
252#endif
253
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100254 /*
255 * We will reach here if only retention/standby states have been
256 * requested at multiple power levels. This means that the cpu
257 * context will be preserved.
258 */
259 wfi();
260
dp-arm3cac7862016-09-19 11:18:44 +0100261#if ENABLE_RUNTIME_INSTRUMENTATION
262 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
263 RT_INSTR_EXIT_HW_LOW_PWR,
264 PMF_NO_CACHE_MAINT);
265#endif
266
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100267 /*
268 * After we wake up from context retaining suspend, call the
269 * context retaining suspend finisher.
270 */
Achin Gupta9b2bf252016-06-28 16:46:15 +0100271 psci_suspend_to_standby_finisher(idx, end_pwrlvl);
Soby Mathew991d42c2015-06-29 16:30:12 +0100272}
273
274/*******************************************************************************
Soby Mathew3a9e8bf2015-05-05 16:33:16 +0100275 * The following functions finish an earlier suspend request. They
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100276 * are called by the common finisher routine in psci_common.c. The `state_info`
277 * is the psci_power_state from which this CPU has woken up from.
Soby Mathew991d42c2015-06-29 16:30:12 +0100278 ******************************************************************************/
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100279void psci_cpu_suspend_finish(int cpu_idx, const psci_power_state_t *state_info)
Soby Mathew991d42c2015-06-29 16:30:12 +0100280{
Antonio Nino Diaz391a76e2016-05-18 16:53:31 +0100281 unsigned int counter_freq;
Achin Gupta9a0ff9b2015-09-07 20:43:27 +0100282 unsigned int max_off_lvl;
Soby Mathew991d42c2015-06-29 16:30:12 +0100283
Soby Mathew991d42c2015-06-29 16:30:12 +0100284 /* Ensure we have been woken up from a suspended state */
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100285 assert((psci_get_aff_info_state() == AFF_STATE_ON) &&
286 (is_local_state_off(
287 state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0));
Soby Mathew991d42c2015-06-29 16:30:12 +0100288
289 /*
290 * Plat. management: Perform the platform specific actions
291 * before we change the state of the cpu e.g. enabling the
292 * gic or zeroing the mailbox register. If anything goes
293 * wrong then assert as there is no way to recover from this
294 * situation.
295 */
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100296 psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +0100297
Soby Mathew043fe9c2017-04-10 22:35:42 +0100298#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000299 /* Arch. management: Enable the data cache, stack memory maintenance. */
Soby Mathew991d42c2015-06-29 16:30:12 +0100300 psci_do_pwrup_cache_maintenance();
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000301#endif
Soby Mathew991d42c2015-06-29 16:30:12 +0100302
303 /* Re-init the cntfrq_el0 register */
Antonio Nino Diaz391a76e2016-05-18 16:53:31 +0100304 counter_freq = plat_get_syscnt_freq2();
Soby Mathew991d42c2015-06-29 16:30:12 +0100305 write_cntfrq_el0(counter_freq);
306
307 /*
308 * Call the cpu suspend finish handler registered by the Secure Payload
309 * Dispatcher to let it do any bookeeping. If the handler encounters an
310 * error, it's expected to assert within
311 */
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100312 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) {
Achin Gupta9a0ff9b2015-09-07 20:43:27 +0100313 max_off_lvl = psci_find_max_off_lvl(state_info);
Antonio Nino Diaz56a0e8e2018-07-16 23:19:25 +0100314 assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
Achin Gupta9a0ff9b2015-09-07 20:43:27 +0100315 psci_spd_pm->svc_suspend_finish(max_off_lvl);
Soby Mathew991d42c2015-06-29 16:30:12 +0100316 }
317
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100318 /* Invalidate the suspend level for the cpu */
Soby Mathew011ca182015-07-29 17:05:03 +0100319 psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
Soby Mathew991d42c2015-06-29 16:30:12 +0100320
Dimitris Papastamosd1a18412017-11-28 15:16:00 +0000321 PUBLISH_EVENT(psci_suspend_pwrdown_finish);
322
Soby Mathew991d42c2015-06-29 16:30:12 +0100323 /*
324 * Generic management: Now we just need to retrieve the
325 * information that we had stashed away during the suspend
326 * call to set this cpu on its way.
327 */
328 cm_prepare_el3_exit(NON_SECURE);
Soby Mathew991d42c2015-06-29 16:30:12 +0100329}