blob: 22685ba806fabb7027f18fcfb5a5cbb1a9533f1e [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleyab2d31e2013-12-02 19:25:12 +00002 * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <stdio.h>
32#include <string.h>
33#include <assert.h>
34#include <arch_helpers.h>
35#include <console.h>
36#include <platform.h>
37#include <psci.h>
38#include <psci_private.h>
39
40typedef int (*afflvl_off_handler)(unsigned long, aff_map_node *);
41
42/*******************************************************************************
43 * The next three functions implement a handler for each supported affinity
44 * level which is called when that affinity level is turned off.
45 ******************************************************************************/
46static int psci_afflvl0_off(unsigned long mpidr, aff_map_node *cpu_node)
47{
48 unsigned int index, plat_state;
49 int rc = PSCI_E_SUCCESS;
50 unsigned long sctlr = read_sctlr();
51
52 assert(cpu_node->level == MPIDR_AFFLVL0);
53
54 /*
55 * Generic management: Get the index for clearing any
56 * lingering re-entry information
57 */
58 index = cpu_node->data;
59 memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index]));
60
61 /*
62 * Arch. management. Perform the necessary steps to flush all
63 * cpu caches.
64 *
65 * TODO: This power down sequence varies across cpus so it needs to be
66 * abstracted out on the basis of the MIDR like in cpu_reset_handler().
67 * Do the bare minimal for the time being. Fix this before porting to
68 * Cortex models.
69 */
70 sctlr &= ~SCTLR_C_BIT;
71 write_sctlr(sctlr);
72
73 /*
74 * CAUTION: This flush to the level of unification makes an assumption
75 * about the cache hierarchy at affinity level 0 (cpu) in the platform.
76 * Ideally the platform should tell psci which levels to flush to exit
77 * coherency.
78 */
79 dcsw_op_louis(DCCISW);
80
81 /*
82 * Plat. management: Perform platform specific actions to turn this
83 * cpu off e.g. exit cpu coherency, program the power controller etc.
84 */
85 if (psci_plat_pm_ops->affinst_off) {
86
87 /* Get the current physical state of this cpu */
88 plat_state = psci_get_aff_phys_state(cpu_node);
89 rc = psci_plat_pm_ops->affinst_off(mpidr,
90 cpu_node->level,
91 plat_state);
92 }
93
Achin Gupta4f6ad662013-10-25 09:08:21 +010094 return rc;
95}
96
97static int psci_afflvl1_off(unsigned long mpidr, aff_map_node *cluster_node)
98{
99 int rc = PSCI_E_SUCCESS;
100 unsigned int plat_state;
101
102 /* Sanity check the cluster level */
103 assert(cluster_node->level == MPIDR_AFFLVL1);
104
105 /*
106 * Keep the physical state of this cluster handy to decide
107 * what action needs to be taken
108 */
109 plat_state = psci_get_aff_phys_state(cluster_node);
110
111 /*
112 * Arch. Management. Flush all levels of caches to PoC if
113 * the cluster is to be shutdown
114 */
115 if (plat_state == PSCI_STATE_OFF)
116 dcsw_op_all(DCCISW);
117
118 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000119 * Plat. Management. Allow the platform to do its cluster
Achin Gupta4f6ad662013-10-25 09:08:21 +0100120 * specific bookeeping e.g. turn off interconnect coherency,
121 * program the power controller etc.
122 */
123 if (psci_plat_pm_ops->affinst_off)
124 rc = psci_plat_pm_ops->affinst_off(mpidr,
125 cluster_node->level,
126 plat_state);
127
128 return rc;
129}
130
131static int psci_afflvl2_off(unsigned long mpidr, aff_map_node *system_node)
132{
133 int rc = PSCI_E_SUCCESS;
134 unsigned int plat_state;
135
136 /* Cannot go beyond this level */
137 assert(system_node->level == MPIDR_AFFLVL2);
138
139 /*
140 * Keep the physical state of the system handy to decide what
141 * action needs to be taken
142 */
143 plat_state = psci_get_aff_phys_state(system_node);
144
145 /* No arch. and generic bookeeping to do here currently */
146
147 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000148 * Plat. Management : Allow the platform to do its bookeeping
Achin Gupta4f6ad662013-10-25 09:08:21 +0100149 * at this affinity level
150 */
151 if (psci_plat_pm_ops->affinst_off)
152 rc = psci_plat_pm_ops->affinst_off(mpidr,
153 system_node->level,
154 plat_state);
155 return rc;
156}
157
158static const afflvl_off_handler psci_afflvl_off_handlers[] = {
159 psci_afflvl0_off,
160 psci_afflvl1_off,
161 psci_afflvl2_off,
162};
163
164/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000165 * This function takes an array of pointers to affinity instance nodes in the
166 * topology tree and calls the off handler for the corresponding affinity
167 * levels
168 ******************************************************************************/
169static int psci_call_off_handlers(mpidr_aff_map_nodes mpidr_nodes,
170 int start_afflvl,
171 int end_afflvl,
172 unsigned long mpidr)
173{
174 int rc = PSCI_E_INVALID_PARAMS, level;
175 aff_map_node *node;
176
177 for (level = start_afflvl; level <= end_afflvl; level++) {
178 node = mpidr_nodes[level];
179 if (node == NULL)
180 continue;
181
182 /*
183 * TODO: In case of an error should there be a way
184 * of restoring what we might have torn down at
185 * lower affinity levels.
186 */
187 rc = psci_afflvl_off_handlers[level](mpidr, node);
188 if (rc != PSCI_E_SUCCESS)
189 break;
190 }
191
192 return rc;
193}
194
195/*******************************************************************************
196 * Top level handler which is called when a cpu wants to power itself down.
197 * It's assumed that along with turning the cpu off, higher affinity levels will
198 * be turned off as far as possible. It traverses through all the affinity
199 * levels performing generic, architectural, platform setup and state management
200 * e.g. for a cluster that's to be powered off, it will call the platform
201 * specific code which will disable coherency at the interconnect level if the
202 * cpu is the last in the cluster. For a cpu it could mean programming the power
203 * the power controller etc.
204 *
205 * The state of all the relevant affinity levels is changed prior to calling the
206 * affinity level specific handlers as their actions would depend upon the state
207 * the affinity level is about to enter.
208 *
209 * The affinity level specific handlers are called in ascending order i.e. from
210 * the lowest to the highest affinity level implemented by the platform because
211 * to turn off affinity level X it is neccesary to turn off affinity level X - 1
212 * first.
213 *
214 * CAUTION: This function is called with coherent stacks so that coherency can
215 * be turned off and caches can be flushed safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100216 ******************************************************************************/
217int psci_afflvl_off(unsigned long mpidr,
Achin Gupta0959db52013-12-02 17:33:04 +0000218 int start_afflvl,
219 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100220{
Achin Gupta0959db52013-12-02 17:33:04 +0000221 int rc = PSCI_E_SUCCESS;
222 unsigned int prev_state;
223 mpidr_aff_map_nodes mpidr_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100224
225 mpidr &= MPIDR_AFFINITY_MASK;;
226
227 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000228 * Collect the pointers to the nodes in the topology tree for
229 * each affinity instance in the mpidr. If this function does
230 * not return successfully then either the mpidr or the affinity
231 * levels are incorrect. In either case, we cannot return back
232 * to the caller as it would not know what to do.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100233 */
Achin Gupta0959db52013-12-02 17:33:04 +0000234 rc = psci_get_aff_map_nodes(mpidr,
235 start_afflvl,
236 end_afflvl,
237 mpidr_nodes);
238 assert (rc == PSCI_E_SUCCESS);
239
Achin Gupta4f6ad662013-10-25 09:08:21 +0100240 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000241 * This function acquires the lock corresponding to each affinity
242 * level so that by the time all locks are taken, the system topology
243 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100244 */
Achin Gupta0959db52013-12-02 17:33:04 +0000245 psci_acquire_afflvl_locks(mpidr,
246 start_afflvl,
247 end_afflvl,
248 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100249
250 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000251 * Keep the old cpu state handy. It will be used to restore the
252 * system to its original state in case something goes wrong
Achin Gupta4f6ad662013-10-25 09:08:21 +0100253 */
Achin Gupta0959db52013-12-02 17:33:04 +0000254 prev_state = psci_get_state(mpidr_nodes[MPIDR_AFFLVL0]->state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100255
256 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000257 * State management: Update the state of each affinity instance
258 * between the start and end affinity levels
Achin Gupta4f6ad662013-10-25 09:08:21 +0100259 */
Achin Gupta0959db52013-12-02 17:33:04 +0000260 psci_change_state(mpidr_nodes,
261 start_afflvl,
262 end_afflvl,
263 PSCI_STATE_OFF);
264
265 /* Perform generic, architecture and platform specific handling */
266 rc = psci_call_off_handlers(mpidr_nodes,
267 start_afflvl,
268 end_afflvl,
269 mpidr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100270
271 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000272 * If an error is returned by a handler then restore the cpu state
273 * to its original value. If the cpu state is restored then that
274 * should result in the state of the higher affinity levels to
275 * get restored as well.
276 * TODO: We are not undoing any architectural or platform specific
277 * operations that might have completed before encountering the
278 * error. The system might not be in a stable state.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100279 */
Achin Gupta0959db52013-12-02 17:33:04 +0000280 if (rc != PSCI_E_SUCCESS)
281 psci_change_state(mpidr_nodes,
282 start_afflvl,
283 end_afflvl,
284 prev_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100285
286 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000287 * Release the locks corresponding to each affinity level in the
288 * reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100289 */
Achin Gupta0959db52013-12-02 17:33:04 +0000290 psci_release_afflvl_locks(mpidr,
291 start_afflvl,
292 end_afflvl,
293 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100294
Achin Gupta4f6ad662013-10-25 09:08:21 +0100295 return rc;
296}