blob: a8904e98459c353077e4768bd67748ae9dd53775 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <assert.h>
34#include <string.h>
Dan Handley714a0d22014-04-09 13:13:04 +010035#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010036
Andrew Thoelke2bc07852014-06-09 12:44:21 +010037typedef int (*afflvl_off_handler_t)(aff_map_node_t *);
Achin Gupta4f6ad662013-10-25 09:08:21 +010038
39/*******************************************************************************
40 * The next three functions implement a handler for each supported affinity
41 * level which is called when that affinity level is turned off.
42 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +010043static int psci_afflvl0_off(aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +010044{
Andrew Thoelke4e126072014-06-04 21:10:52 +010045 unsigned int plat_state;
46 int rc;
Vikram Kanigiri78a6e0c2014-03-11 17:41:00 +000047 unsigned long sctlr;
Achin Gupta4f6ad662013-10-25 09:08:21 +010048
49 assert(cpu_node->level == MPIDR_AFFLVL0);
50
Achin Gupta75f73672013-12-05 16:33:10 +000051 /* State management: mark this cpu as turned off */
52 psci_set_state(cpu_node, PSCI_STATE_OFF);
53
Achin Gupta4f6ad662013-10-25 09:08:21 +010054 /*
Achin Gupta607084e2014-02-09 18:24:19 +000055 * Generic management: Get the index for clearing any lingering re-entry
56 * information and allow the secure world to switch itself off
57 */
58
59 /*
60 * Call the cpu off handler registered by the Secure Payload Dispatcher
61 * to let it do any bookeeping. Assume that the SPD always reports an
62 * E_DENIED error if SP refuse to power down
Achin Gupta4f6ad662013-10-25 09:08:21 +010063 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000064 if (psci_spd_pm && psci_spd_pm->svc_off) {
65 rc = psci_spd_pm->svc_off(0);
Achin Gupta607084e2014-02-09 18:24:19 +000066 if (rc)
67 return rc;
68 }
69
Achin Gupta4f6ad662013-10-25 09:08:21 +010070 /*
71 * Arch. management. Perform the necessary steps to flush all
72 * cpu caches.
73 *
74 * TODO: This power down sequence varies across cpus so it needs to be
75 * abstracted out on the basis of the MIDR like in cpu_reset_handler().
76 * Do the bare minimal for the time being. Fix this before porting to
77 * Cortex models.
78 */
Vikram Kanigiri78a6e0c2014-03-11 17:41:00 +000079 sctlr = read_sctlr_el3();
Achin Gupta4f6ad662013-10-25 09:08:21 +010080 sctlr &= ~SCTLR_C_BIT;
Vikram Kanigiri78a6e0c2014-03-11 17:41:00 +000081 write_sctlr_el3(sctlr);
Andrew Thoelke42e75a72014-04-28 12:28:39 +010082 isb(); /* ensure MMU disable takes immediate effect */
Achin Gupta4f6ad662013-10-25 09:08:21 +010083
84 /*
85 * CAUTION: This flush to the level of unification makes an assumption
86 * about the cache hierarchy at affinity level 0 (cpu) in the platform.
87 * Ideally the platform should tell psci which levels to flush to exit
88 * coherency.
89 */
90 dcsw_op_louis(DCCISW);
91
92 /*
93 * Plat. management: Perform platform specific actions to turn this
94 * cpu off e.g. exit cpu coherency, program the power controller etc.
95 */
Andrew Thoelke4e126072014-06-04 21:10:52 +010096 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +010097 if (psci_plat_pm_ops->affinst_off) {
98
99 /* Get the current physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000100 plat_state = psci_get_phys_state(cpu_node);
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100101 rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100102 cpu_node->level,
103 plat_state);
104 }
105
Achin Gupta4f6ad662013-10-25 09:08:21 +0100106 return rc;
107}
108
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100109static int psci_afflvl1_off(aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110{
111 int rc = PSCI_E_SUCCESS;
112 unsigned int plat_state;
113
114 /* Sanity check the cluster level */
115 assert(cluster_node->level == MPIDR_AFFLVL1);
116
Achin Gupta75f73672013-12-05 16:33:10 +0000117 /* State management: Decrement the cluster reference count */
118 psci_set_state(cluster_node, PSCI_STATE_OFF);
119
Achin Gupta4f6ad662013-10-25 09:08:21 +0100120 /*
121 * Keep the physical state of this cluster handy to decide
122 * what action needs to be taken
123 */
Achin Gupta75f73672013-12-05 16:33:10 +0000124 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100125
126 /*
127 * Arch. Management. Flush all levels of caches to PoC if
128 * the cluster is to be shutdown
129 */
130 if (plat_state == PSCI_STATE_OFF)
131 dcsw_op_all(DCCISW);
132
133 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000134 * Plat. Management. Allow the platform to do its cluster
Achin Gupta4f6ad662013-10-25 09:08:21 +0100135 * specific bookeeping e.g. turn off interconnect coherency,
136 * program the power controller etc.
137 */
138 if (psci_plat_pm_ops->affinst_off)
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100139 rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100140 cluster_node->level,
141 plat_state);
142
143 return rc;
144}
145
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100146static int psci_afflvl2_off(aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100147{
148 int rc = PSCI_E_SUCCESS;
149 unsigned int plat_state;
150
151 /* Cannot go beyond this level */
152 assert(system_node->level == MPIDR_AFFLVL2);
153
Achin Gupta75f73672013-12-05 16:33:10 +0000154 /* State management: Decrement the system reference count */
155 psci_set_state(system_node, PSCI_STATE_OFF);
156
Achin Gupta4f6ad662013-10-25 09:08:21 +0100157 /*
158 * Keep the physical state of the system handy to decide what
159 * action needs to be taken
160 */
Achin Gupta75f73672013-12-05 16:33:10 +0000161 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100162
163 /* No arch. and generic bookeeping to do here currently */
164
165 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000166 * Plat. Management : Allow the platform to do its bookeeping
Achin Gupta4f6ad662013-10-25 09:08:21 +0100167 * at this affinity level
168 */
169 if (psci_plat_pm_ops->affinst_off)
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100170 rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100171 system_node->level,
172 plat_state);
173 return rc;
174}
175
Dan Handleye2712bc2014-04-10 15:37:22 +0100176static const afflvl_off_handler_t psci_afflvl_off_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100177 psci_afflvl0_off,
178 psci_afflvl1_off,
179 psci_afflvl2_off,
180};
181
182/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000183 * This function takes an array of pointers to affinity instance nodes in the
184 * topology tree and calls the off handler for the corresponding affinity
185 * levels
186 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100187static int psci_call_off_handlers(mpidr_aff_map_nodes_t mpidr_nodes,
Achin Gupta0959db52013-12-02 17:33:04 +0000188 int start_afflvl,
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100189 int end_afflvl)
Achin Gupta0959db52013-12-02 17:33:04 +0000190{
191 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleye2712bc2014-04-10 15:37:22 +0100192 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000193
194 for (level = start_afflvl; level <= end_afflvl; level++) {
195 node = mpidr_nodes[level];
196 if (node == NULL)
197 continue;
198
199 /*
200 * TODO: In case of an error should there be a way
201 * of restoring what we might have torn down at
202 * lower affinity levels.
203 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100204 rc = psci_afflvl_off_handlers[level](node);
Achin Gupta0959db52013-12-02 17:33:04 +0000205 if (rc != PSCI_E_SUCCESS)
206 break;
207 }
208
209 return rc;
210}
211
212/*******************************************************************************
213 * Top level handler which is called when a cpu wants to power itself down.
214 * It's assumed that along with turning the cpu off, higher affinity levels will
215 * be turned off as far as possible. It traverses through all the affinity
216 * levels performing generic, architectural, platform setup and state management
217 * e.g. for a cluster that's to be powered off, it will call the platform
218 * specific code which will disable coherency at the interconnect level if the
219 * cpu is the last in the cluster. For a cpu it could mean programming the power
220 * the power controller etc.
221 *
222 * The state of all the relevant affinity levels is changed prior to calling the
223 * affinity level specific handlers as their actions would depend upon the state
224 * the affinity level is about to enter.
225 *
226 * The affinity level specific handlers are called in ascending order i.e. from
227 * the lowest to the highest affinity level implemented by the platform because
228 * to turn off affinity level X it is neccesary to turn off affinity level X - 1
229 * first.
230 *
231 * CAUTION: This function is called with coherent stacks so that coherency can
232 * be turned off and caches can be flushed safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100233 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100234int psci_afflvl_off(int start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000235 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100236{
Achin Gupta0959db52013-12-02 17:33:04 +0000237 int rc = PSCI_E_SUCCESS;
Dan Handleye2712bc2014-04-10 15:37:22 +0100238 mpidr_aff_map_nodes_t mpidr_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100239
Achin Gupta4f6ad662013-10-25 09:08:21 +0100240
241 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000242 * Collect the pointers to the nodes in the topology tree for
243 * each affinity instance in the mpidr. If this function does
244 * not return successfully then either the mpidr or the affinity
245 * levels are incorrect. In either case, we cannot return back
246 * to the caller as it would not know what to do.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100247 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100248 rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
Achin Gupta0959db52013-12-02 17:33:04 +0000249 start_afflvl,
250 end_afflvl,
251 mpidr_nodes);
252 assert (rc == PSCI_E_SUCCESS);
253
Achin Gupta4f6ad662013-10-25 09:08:21 +0100254 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000255 * This function acquires the lock corresponding to each affinity
256 * level so that by the time all locks are taken, the system topology
257 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100258 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100259 psci_acquire_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000260 end_afflvl,
261 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100262
Achin Gupta0959db52013-12-02 17:33:04 +0000263 /* Perform generic, architecture and platform specific handling */
264 rc = psci_call_off_handlers(mpidr_nodes,
265 start_afflvl,
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100266 end_afflvl);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100267
268 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000269 * Release the locks corresponding to each affinity level in the
270 * reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100271 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100272 psci_release_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000273 end_afflvl,
274 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100275
Achin Gupta4f6ad662013-10-25 09:08:21 +0100276 return rc;
277}