blob: 4ba78656562f626ec4abb551075a28b29699773c [file] [log] [blame]
Soby Mathew991d42c2015-06-29 16:30:12 +01001/*
dp-arm66abfbe2017-01-31 13:01:04 +00002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew991d42c2015-06-29 16:30:12 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <debug.h>
Soby Mathew9d754f62015-04-08 17:42:06 +010035#include <platform.h>
dp-arm3cac7862016-09-19 11:18:44 +010036#include <pmf.h>
37#include <runtime_instr.h>
Soby Mathew991d42c2015-06-29 16:30:12 +010038#include <string.h>
39#include "psci_private.h"
40
Soby Mathew6b8b3022015-06-30 11:00:24 +010041/******************************************************************************
Soby Mathew85dbf5a2015-04-07 12:16:56 +010042 * Construct the psci_power_state to request power OFF at all power levels.
43 ******************************************************************************/
44static void psci_set_power_off_state(psci_power_state_t *state_info)
45{
46 int lvl;
47
48 for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
49 state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
50}
51
52/******************************************************************************
Soby Mathew991d42c2015-06-29 16:30:12 +010053 * Top level handler which is called when a cpu wants to power itself down.
Soby Mathew3a9e8bf2015-05-05 16:33:16 +010054 * It's assumed that along with turning the cpu power domain off, power
55 * domains at higher levels will be turned off as far as possible. It finds
56 * the highest level where a domain has to be powered off by traversing the
57 * node information and then performs generic, architectural, platform setup
58 * and state management required to turn OFF that power domain and domains
59 * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
60 * the power controller whereas for a cluster that's to be powered off, it will
61 * call the platform specific code which will disable coherency at the
62 * interconnect level if the cpu is the last in the cluster and also the
63 * program the power controller.
Soby Mathew991d42c2015-06-29 16:30:12 +010064 ******************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +010065int psci_do_cpu_off(unsigned int end_pwrlvl)
Soby Mathew991d42c2015-06-29 16:30:12 +010066{
Soby Mathewd50e7d92015-10-01 16:46:06 +010067 int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
Soby Mathew85dbf5a2015-04-07 12:16:56 +010068 psci_power_state_t state_info;
Soby Mathew991d42c2015-06-29 16:30:12 +010069
70 /*
71 * This function must only be called on platforms where the
72 * CPU_OFF platform hooks have been implemented.
73 */
Soby Mathew3a9e8bf2015-05-05 16:33:16 +010074 assert(psci_plat_pm_ops->pwr_domain_off);
Soby Mathew991d42c2015-06-29 16:30:12 +010075
76 /*
Soby Mathew3a9e8bf2015-05-05 16:33:16 +010077 * This function acquires the lock corresponding to each power
Soby Mathew991d42c2015-06-29 16:30:12 +010078 * level so that by the time all locks are taken, the system topology
79 * is snapshot and state management can be done safely.
80 */
Soby Mathew9d754f62015-04-08 17:42:06 +010081 psci_acquire_pwr_domain_locks(end_pwrlvl,
82 idx);
Soby Mathew991d42c2015-06-29 16:30:12 +010083
84 /*
85 * Call the cpu off handler registered by the Secure Payload Dispatcher
86 * to let it do any bookkeeping. Assume that the SPD always reports an
87 * E_DENIED error if SP refuse to power down
88 */
89 if (psci_spd_pm && psci_spd_pm->svc_off) {
90 rc = psci_spd_pm->svc_off(0);
91 if (rc)
92 goto exit;
93 }
94
Soby Mathew85dbf5a2015-04-07 12:16:56 +010095 /* Construct the psci_power_state for CPU_OFF */
96 psci_set_power_off_state(&state_info);
97
Soby Mathew991d42c2015-06-29 16:30:12 +010098 /*
Soby Mathew85dbf5a2015-04-07 12:16:56 +010099 * This function is passed the requested state info and
100 * it returns the negotiated state info for each power level upto
101 * the end level specified.
Soby Mathew991d42c2015-06-29 16:30:12 +0100102 */
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100103 psci_do_state_coordination(end_pwrlvl, &state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +0100104
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100105#if ENABLE_PSCI_STAT
106 /* Update the last cpu for each level till end_pwrlvl */
107 psci_stats_update_pwr_down(end_pwrlvl, &state_info);
108#endif
109
dp-arm2d92de62016-11-15 13:25:30 +0000110#if ENABLE_RUNTIME_INSTRUMENTATION
111
112 /*
113 * Flush cache line so that even if CPU power down happens
114 * the timestamp update is reflected in memory.
115 */
116 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
117 RT_INSTR_ENTER_CFLUSH,
118 PMF_CACHE_MAINT);
119#endif
120
Soby Mathew6b8b3022015-06-30 11:00:24 +0100121 /*
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000122 * Arch. management. Initiate power down sequence.
Soby Mathew6b8b3022015-06-30 11:00:24 +0100123 */
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000124 psci_do_pwrdown_sequence(psci_find_max_off_lvl(&state_info));
Soby Mathew991d42c2015-06-29 16:30:12 +0100125
dp-arm2d92de62016-11-15 13:25:30 +0000126#if ENABLE_RUNTIME_INSTRUMENTATION
127 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
128 RT_INSTR_EXIT_CFLUSH,
129 PMF_NO_CACHE_MAINT);
130#endif
131
Soby Mathew991d42c2015-06-29 16:30:12 +0100132 /*
Soby Mathew6b8b3022015-06-30 11:00:24 +0100133 * Plat. management: Perform platform specific actions to turn this
134 * cpu off e.g. exit cpu coherency, program the power controller etc.
Soby Mathew991d42c2015-06-29 16:30:12 +0100135 */
Soby Mathew85dbf5a2015-04-07 12:16:56 +0100136 psci_plat_pm_ops->pwr_domain_off(&state_info);
Soby Mathew991d42c2015-06-29 16:30:12 +0100137
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100138#if ENABLE_PSCI_STAT
dp-arm66abfbe2017-01-31 13:01:04 +0000139 plat_psci_stat_accounting_start(&state_info);
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100140#endif
141
Soby Mathew991d42c2015-06-29 16:30:12 +0100142exit:
143 /*
Soby Mathew3a9e8bf2015-05-05 16:33:16 +0100144 * Release the locks corresponding to each power level in the
Soby Mathew991d42c2015-06-29 16:30:12 +0100145 * reverse order to which they were acquired.
146 */
Soby Mathew9d754f62015-04-08 17:42:06 +0100147 psci_release_pwr_domain_locks(end_pwrlvl,
148 idx);
Soby Mathew991d42c2015-06-29 16:30:12 +0100149
150 /*
Soby Mathew991d42c2015-06-29 16:30:12 +0100151 * Check if all actions needed to safely power down this cpu have
Soby Mathewd50e7d92015-10-01 16:46:06 +0100152 * successfully completed.
Soby Mathew991d42c2015-06-29 16:30:12 +0100153 */
Soby Mathewd50e7d92015-10-01 16:46:06 +0100154 if (rc == PSCI_E_SUCCESS) {
155 /*
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000156 * Set the affinity info state to OFF. When caches are disabled,
157 * this writes directly to main memory, so cache maintenance is
Soby Mathewd50e7d92015-10-01 16:46:06 +0100158 * required to ensure that later cached reads of aff_info_state
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000159 * return AFF_STATE_OFF. A dsbish() ensures ordering of the
Soby Mathewca370502016-01-26 11:47:53 +0000160 * update to the affinity info state prior to cache line
161 * invalidation.
Soby Mathewd50e7d92015-10-01 16:46:06 +0100162 */
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000163 psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
Soby Mathewd50e7d92015-10-01 16:46:06 +0100164 psci_set_aff_info_state(AFF_STATE_OFF);
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000165 psci_dsbish();
166 psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
Soby Mathewd50e7d92015-10-01 16:46:06 +0100167
dp-arm3cac7862016-09-19 11:18:44 +0100168#if ENABLE_RUNTIME_INSTRUMENTATION
169
170 /*
171 * Update the timestamp with cache off. We assume this
172 * timestamp can only be read from the current CPU and the
173 * timestamp cache line will be flushed before return to
174 * normal world on wakeup.
175 */
176 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
177 RT_INSTR_ENTER_HW_LOW_PWR,
178 PMF_NO_CACHE_MAINT);
179#endif
180
Soby Mathew6a816412016-04-27 14:46:28 +0100181 if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
182 /* This function must not return */
183 psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
184 } else {
185 /*
186 * Enter a wfi loop which will allow the power
187 * controller to physically power down this cpu.
188 */
189 psci_power_down_wfi();
190 }
Soby Mathewd50e7d92015-10-01 16:46:06 +0100191 }
Soby Mathew991d42c2015-06-29 16:30:12 +0100192
193 return rc;
194}