blob: e7fb6532203adfc705fcd99e29469f16d1de7a3b [file] [log] [blame]
Soby Mathew991d42c2015-06-29 16:30:12 +01001/*
dp-arm66abfbe2017-01-31 13:01:04 +00002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew991d42c2015-06-29 16:30:12 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew991d42c2015-06-29 16:30:12 +01005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <debug.h>
Soby Mathew9d754f62015-04-08 17:42:06 +010011#include <platform.h>
dp-arm3cac7862016-09-19 11:18:44 +010012#include <pmf.h>
13#include <runtime_instr.h>
Soby Mathew991d42c2015-06-29 16:30:12 +010014#include <string.h>
15#include "psci_private.h"
16
Soby Mathew6b8b3022015-06-30 11:00:24 +010017/******************************************************************************
Soby Mathew85dbf5a2015-04-07 12:16:56 +010018 * Construct the psci_power_state to request power OFF at all power levels.
19 ******************************************************************************/
20static void psci_set_power_off_state(psci_power_state_t *state_info)
21{
Varun Wadekar66231d12017-06-07 09:57:42 -070022 unsigned int lvl;
Soby Mathew85dbf5a2015-04-07 12:16:56 +010023
24 for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
25 state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
26}
27
28/******************************************************************************
Soby Mathew991d42c2015-06-29 16:30:12 +010029 * Top level handler which is called when a cpu wants to power itself down.
Soby Mathew3a9e8bf2015-05-05 16:33:16 +010030 * It's assumed that along with turning the cpu power domain off, power
31 * domains at higher levels will be turned off as far as possible. It finds
32 * the highest level where a domain has to be powered off by traversing the
33 * node information and then performs generic, architectural, platform setup
34 * and state management required to turn OFF that power domain and domains
35 * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
36 * the power controller whereas for a cluster that's to be powered off, it will
37 * call the platform specific code which will disable coherency at the
38 * interconnect level if the cpu is the last in the cluster and also the
39 * program the power controller.
Soby Mathew991d42c2015-06-29 16:30:12 +010040 ******************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +010041int psci_do_cpu_off(unsigned int end_pwrlvl)
Soby Mathew991d42c2015-06-29 16:30:12 +010042{
Soby Mathewd50e7d92015-10-01 16:46:06 +010043 int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
Soby Mathew85dbf5a2015-04-07 12:16:56 +010044 psci_power_state_t state_info;
Soby Mathew991d42c2015-06-29 16:30:12 +010045
46 /*
47 * This function must only be called on platforms where the
48 * CPU_OFF platform hooks have been implemented.
49 */
Soby Mathew3a9e8bf2015-05-05 16:33:16 +010050 assert(psci_plat_pm_ops->pwr_domain_off);
Soby Mathew991d42c2015-06-29 16:30:12 +010051
52 /*
Soby Mathew3a9e8bf2015-05-05 16:33:16 +010053 * This function acquires the lock corresponding to each power
Soby Mathew991d42c2015-06-29 16:30:12 +010054 * level so that by the time all locks are taken, the system topology
55 * is snapshot and state management can be done safely.
56 */
Soby Mathew9d754f62015-04-08 17:42:06 +010057 psci_acquire_pwr_domain_locks(end_pwrlvl,
58 idx);
Soby Mathew991d42c2015-06-29 16:30:12 +010059
60 /*
61 * Call the cpu off handler registered by the Secure Payload Dispatcher
62 * to let it do any bookkeeping. Assume that the SPD always reports an
63 * E_DENIED error if SP refuse to power down
64 */
65 if (psci_spd_pm && psci_spd_pm->svc_off) {
66 rc = psci_spd_pm->svc_off(0);
67 if (rc)
68 goto exit;
69 }
70
Soby Mathew85dbf5a2015-04-07 12:16:56 +010071 /* Construct the psci_power_state for CPU_OFF */
72 psci_set_power_off_state(&state_info);
73
Soby Mathew991d42c2015-06-29 16:30:12 +010074 /*
Soby Mathew85dbf5a2015-04-07 12:16:56 +010075 * This function is passed the requested state info and
76 * it returns the negotiated state info for each power level upto
77 * the end level specified.
Soby Mathew991d42c2015-06-29 16:30:12 +010078 */
Soby Mathew85dbf5a2015-04-07 12:16:56 +010079 psci_do_state_coordination(end_pwrlvl, &state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +010080
Yatharth Kochar241ec6c2016-05-09 18:26:35 +010081#if ENABLE_PSCI_STAT
82 /* Update the last cpu for each level till end_pwrlvl */
83 psci_stats_update_pwr_down(end_pwrlvl, &state_info);
84#endif
85
dp-arm2d92de62016-11-15 13:25:30 +000086#if ENABLE_RUNTIME_INSTRUMENTATION
87
88 /*
89 * Flush cache line so that even if CPU power down happens
90 * the timestamp update is reflected in memory.
91 */
92 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
93 RT_INSTR_ENTER_CFLUSH,
94 PMF_CACHE_MAINT);
95#endif
96
Soby Mathew6b8b3022015-06-30 11:00:24 +010097 /*
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +000098 * Arch. management. Initiate power down sequence.
Soby Mathew6b8b3022015-06-30 11:00:24 +010099 */
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000100 psci_do_pwrdown_sequence(psci_find_max_off_lvl(&state_info));
Soby Mathew991d42c2015-06-29 16:30:12 +0100101
dp-arm2d92de62016-11-15 13:25:30 +0000102#if ENABLE_RUNTIME_INSTRUMENTATION
103 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
104 RT_INSTR_EXIT_CFLUSH,
105 PMF_NO_CACHE_MAINT);
106#endif
107
Soby Mathew991d42c2015-06-29 16:30:12 +0100108 /*
Soby Mathew6b8b3022015-06-30 11:00:24 +0100109 * Plat. management: Perform platform specific actions to turn this
110 * cpu off e.g. exit cpu coherency, program the power controller etc.
Soby Mathew991d42c2015-06-29 16:30:12 +0100111 */
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100112 psci_plat_pm_ops->pwr_domain_off(&state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +0100113
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100114#if ENABLE_PSCI_STAT
dp-arm66abfbe2017-01-31 13:01:04 +0000115 plat_psci_stat_accounting_start(&state_info);
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100116#endif
117
Soby Mathew991d42c2015-06-29 16:30:12 +0100118exit:
119 /*
Soby Mathew3a9e8bf2015-05-05 16:33:16 +0100120 * Release the locks corresponding to each power level in the
Soby Mathew991d42c2015-06-29 16:30:12 +0100121 * reverse order to which they were acquired.
122 */
Soby Mathew9d754f62015-04-08 17:42:06 +0100123 psci_release_pwr_domain_locks(end_pwrlvl,
124 idx);
Soby Mathew991d42c2015-06-29 16:30:12 +0100125
126 /*
Soby Mathew991d42c2015-06-29 16:30:12 +0100127 * Check if all actions needed to safely power down this cpu have
Soby Mathewd50e7d92015-10-01 16:46:06 +0100128 * successfully completed.
Soby Mathew991d42c2015-06-29 16:30:12 +0100129 */
Soby Mathewd50e7d92015-10-01 16:46:06 +0100130 if (rc == PSCI_E_SUCCESS) {
131 /*
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000132 * Set the affinity info state to OFF. When caches are disabled,
133 * this writes directly to main memory, so cache maintenance is
Soby Mathewd50e7d92015-10-01 16:46:06 +0100134 * required to ensure that later cached reads of aff_info_state
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000135 * return AFF_STATE_OFF. A dsbish() ensures ordering of the
Soby Mathewca370502016-01-26 11:47:53 +0000136 * update to the affinity info state prior to cache line
137 * invalidation.
Soby Mathewd50e7d92015-10-01 16:46:06 +0100138 */
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000139 psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
Soby Mathewd50e7d92015-10-01 16:46:06 +0100140 psci_set_aff_info_state(AFF_STATE_OFF);
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000141 psci_dsbish();
142 psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
Soby Mathewd50e7d92015-10-01 16:46:06 +0100143
dp-arm3cac7862016-09-19 11:18:44 +0100144#if ENABLE_RUNTIME_INSTRUMENTATION
145
146 /*
147 * Update the timestamp with cache off. We assume this
148 * timestamp can only be read from the current CPU and the
149 * timestamp cache line will be flushed before return to
150 * normal world on wakeup.
151 */
152 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
153 RT_INSTR_ENTER_HW_LOW_PWR,
154 PMF_NO_CACHE_MAINT);
155#endif
156
Soby Mathew6a816412016-04-27 14:46:28 +0100157 if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
158 /* This function must not return */
159 psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
160 } else {
161 /*
162 * Enter a wfi loop which will allow the power
163 * controller to physically power down this cpu.
164 */
165 psci_power_down_wfi();
166 }
Soby Mathewd50e7d92015-10-01 16:46:06 +0100167 }
Soby Mathew991d42c2015-06-29 16:30:12 +0100168
169 return rc;
170}