blob: 05b0aad063acf648e10ebf6bb493a5f8c97513d3 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleyab2d31e2013-12-02 19:25:12 +00002 * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <stdio.h>
32#include <string.h>
33#include <assert.h>
34#include <arch_helpers.h>
35#include <console.h>
36#include <platform.h>
37#include <psci.h>
38#include <psci_private.h>
39
40typedef int (*afflvl_suspend_handler)(unsigned long,
41 aff_map_node *,
42 unsigned long,
43 unsigned long,
44 unsigned int);
45
46/*******************************************************************************
47 * The next three functions implement a handler for each supported affinity
48 * level which is called when that affinity level is about to be suspended.
49 ******************************************************************************/
50static int psci_afflvl0_suspend(unsigned long mpidr,
51 aff_map_node *cpu_node,
52 unsigned long ns_entrypoint,
53 unsigned long context_id,
54 unsigned int power_state)
55{
56 unsigned int index, plat_state;
57 unsigned long psci_entrypoint, sctlr = read_sctlr();
58 int rc = PSCI_E_SUCCESS;
59
60 /* Sanity check to safeguard against data corruption */
61 assert(cpu_node->level == MPIDR_AFFLVL0);
62
63 /*
64 * Generic management: Store the re-entry information for the
65 * non-secure world
66 */
67 index = cpu_node->data;
68 rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
69 if (rc != PSCI_E_SUCCESS)
70 return rc;
71
72 /*
73 * Arch. management: Save the secure context, flush the
74 * L1 caches and exit intra-cluster coherency et al
75 */
76 psci_secure_context[index].sctlr = read_sctlr();
77 psci_secure_context[index].scr = read_scr();
78 psci_secure_context[index].cptr = read_cptr();
79 psci_secure_context[index].cpacr = read_cpacr();
80 psci_secure_context[index].cntfrq = read_cntfrq_el0();
81 psci_secure_context[index].mair = read_mair();
82 psci_secure_context[index].tcr = read_tcr();
83 psci_secure_context[index].ttbr = read_ttbr0();
84 psci_secure_context[index].vbar = read_vbar();
Sandrine Bailleux37382742013-11-18 17:26:59 +000085 psci_secure_context[index].pstate =
86 read_daif() & (DAIF_ABT_BIT | DAIF_DBG_BIT);
Achin Gupta4f6ad662013-10-25 09:08:21 +010087
88 /* Set the secure world (EL3) re-entry point after BL1 */
89 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
90
91 /*
92 * Arch. management. Perform the necessary steps to flush all
93 * cpu caches.
94 *
95 * TODO: This power down sequence varies across cpus so it needs to be
96 * abstracted out on the basis of the MIDR like in cpu_reset_handler().
97 * Do the bare minimal for the time being. Fix this before porting to
98 * Cortex models.
99 */
100 sctlr &= ~SCTLR_C_BIT;
101 write_sctlr(sctlr);
102
103 /*
104 * CAUTION: This flush to the level of unification makes an assumption
105 * about the cache hierarchy at affinity level 0 (cpu) in the platform.
106 * Ideally the platform should tell psci which levels to flush to exit
107 * coherency.
108 */
109 dcsw_op_louis(DCCISW);
110
111 /*
112 * Plat. management: Allow the platform to perform the
113 * necessary actions to turn off this cpu e.g. set the
114 * platform defined mailbox with the psci entrypoint,
115 * program the power controller etc.
116 */
117 if (psci_plat_pm_ops->affinst_suspend) {
118 plat_state = psci_get_aff_phys_state(cpu_node);
119 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
120 psci_entrypoint,
121 ns_entrypoint,
122 cpu_node->level,
123 plat_state);
124 }
125
126 return rc;
127}
128
129static int psci_afflvl1_suspend(unsigned long mpidr,
130 aff_map_node *cluster_node,
131 unsigned long ns_entrypoint,
132 unsigned long context_id,
133 unsigned int power_state)
134{
135 int rc = PSCI_E_SUCCESS;
136 unsigned int plat_state;
137 unsigned long psci_entrypoint;
138
139 /* Sanity check the cluster level */
140 assert(cluster_node->level == MPIDR_AFFLVL1);
141
142 /*
143 * Keep the physical state of this cluster handy to decide
144 * what action needs to be taken
145 */
146 plat_state = psci_get_aff_phys_state(cluster_node);
147
148 /*
149 * Arch. management: Flush all levels of caches to PoC if the
150 * cluster is to be shutdown
151 */
152 if (plat_state == PSCI_STATE_OFF)
153 dcsw_op_all(DCCISW);
154
155 /*
156 * Plat. Management. Allow the platform to do it's cluster
157 * specific bookeeping e.g. turn off interconnect coherency,
158 * program the power controller etc.
159 */
160 if (psci_plat_pm_ops->affinst_suspend) {
161
162 /*
163 * Sending the psci entrypoint is currently redundant
164 * beyond affinity level 0 but one never knows what a
165 * platform might do. Also it allows us to keep the
166 * platform handler prototype the same.
167 */
168 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
169
170 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
171 psci_entrypoint,
172 ns_entrypoint,
173 cluster_node->level,
174 plat_state);
175 }
176
177 return rc;
178}
179
180
181static int psci_afflvl2_suspend(unsigned long mpidr,
182 aff_map_node *system_node,
183 unsigned long ns_entrypoint,
184 unsigned long context_id,
185 unsigned int power_state)
186{
187 int rc = PSCI_E_SUCCESS;
188 unsigned int plat_state;
189 unsigned long psci_entrypoint;
190
191 /* Cannot go beyond this */
192 assert(system_node->level == MPIDR_AFFLVL2);
193
194 /*
195 * Keep the physical state of the system handy to decide what
196 * action needs to be taken
197 */
198 plat_state = psci_get_aff_phys_state(system_node);
199
200 /*
201 * Plat. Management : Allow the platform to do it's bookeeping
202 * at this affinity level
203 */
204 if (psci_plat_pm_ops->affinst_suspend) {
205
206 /*
207 * Sending the psci entrypoint is currently redundant
208 * beyond affinity level 0 but one never knows what a
209 * platform might do. Also it allows us to keep the
210 * platform handler prototype the same.
211 */
212 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
213
214 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
215 psci_entrypoint,
216 ns_entrypoint,
217 system_node->level,
218 plat_state);
219 }
220
221 return rc;
222}
223
224static const afflvl_suspend_handler psci_afflvl_suspend_handlers[] = {
225 psci_afflvl0_suspend,
226 psci_afflvl1_suspend,
227 psci_afflvl2_suspend,
228};
229
230/*******************************************************************************
231 * This function implements the core of the processing required to suspend a cpu
232 * It'S assumed that along with suspending the cpu, higher affinity levels will
233 * be suspended as far as possible. Suspending a cpu is equivalent to physically
234 * powering it down, but the cpu is still available to the OS for scheduling.
235 * We first need to determine the new state off all the affinity instances in
236 * the mpidr corresponding to the target cpu. Action will be taken on the basis
237 * of this new state. To do the state change we first need to acquire the locks
238 * for all the implemented affinity level to be able to snapshot the system
239 * state. Then we need to start suspending affinity levels from the lowest to
240 * the highest (e.g. a cpu needs to be suspended before a cluster can be). To
241 * achieve this flow, we start acquiring the locks from the highest to the
242 * lowest affinity level. Once we reach affinity level 0, we do the state change
243 * followed by the actions corresponding to the new state for affinity level 0.
244 * Actions as per the updated state for higher affinity levels are performed as
245 * we unwind back to highest affinity level.
246 ******************************************************************************/
247int psci_afflvl_suspend(unsigned long mpidr,
248 unsigned long entrypoint,
249 unsigned long context_id,
250 unsigned int power_state,
251 int cur_afflvl,
252 int tgt_afflvl)
253{
254 int rc = PSCI_E_SUCCESS, level;
255 unsigned int prev_state, next_state;
256 aff_map_node *aff_node;
257
258 mpidr &= MPIDR_AFFINITY_MASK;
259
260 /*
261 * Some affinity instances at levels between the current and
262 * target levels could be absent in the mpidr. Skip them and
263 * start from the first present instance.
264 */
265 level = psci_get_first_present_afflvl(mpidr,
266 cur_afflvl,
267 tgt_afflvl,
268 &aff_node);
269
270 /*
271 * Return if there are no more affinity instances beyond this
272 * level to process. Else ensure that the returned affinity
273 * node makes sense.
274 */
275 if (aff_node == NULL)
276 return rc;
277
278 assert(level == aff_node->level);
279
280 /*
281 * This function acquires the lock corresponding to each
282 * affinity level so that state management can be done safely.
283 */
284 bakery_lock_get(mpidr, &aff_node->lock);
285
286 /* Keep the old state and the next one handy */
287 prev_state = psci_get_state(aff_node->state);
288 next_state = PSCI_STATE_SUSPEND;
289
290 /*
291 * We start from the highest affinity level and work our way
292 * downwards to the lowest i.e. MPIDR_AFFLVL0.
293 */
294 if (aff_node->level == tgt_afflvl) {
295 psci_change_state(mpidr,
296 tgt_afflvl,
297 get_max_afflvl(),
298 next_state);
299 } else {
300 rc = psci_afflvl_suspend(mpidr,
301 entrypoint,
302 context_id,
303 power_state,
304 level - 1,
305 tgt_afflvl);
306 if (rc != PSCI_E_SUCCESS) {
307 psci_set_state(aff_node->state, prev_state);
308 goto exit;
309 }
310 }
311
312 /*
313 * Perform generic, architecture and platform specific
314 * handling
315 */
316 rc = psci_afflvl_suspend_handlers[level](mpidr,
317 aff_node,
318 entrypoint,
319 context_id,
320 power_state);
321 if (rc != PSCI_E_SUCCESS) {
322 psci_set_state(aff_node->state, prev_state);
323 goto exit;
324 }
325
326 /*
327 * If all has gone as per plan then this cpu should be
328 * marked as OFF
329 */
330 if (level == MPIDR_AFFLVL0) {
331 next_state = psci_get_state(aff_node->state);
332 assert(next_state == PSCI_STATE_SUSPEND);
333 }
334
335exit:
336 bakery_lock_release(mpidr, &aff_node->lock);
337 return rc;
338}
339
340/*******************************************************************************
341 * The following functions finish an earlier affinity suspend request. They
342 * are called by the common finisher routine in psci_common.c.
343 ******************************************************************************/
344static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
345 aff_map_node *cpu_node,
346 unsigned int prev_state)
347{
348 unsigned int index, plat_state, rc = 0;
349
350 assert(cpu_node->level == MPIDR_AFFLVL0);
351
352 /*
353 * Plat. management: Perform the platform specific actions
354 * before we change the state of the cpu e.g. enabling the
355 * gic or zeroing the mailbox register. If anything goes
356 * wrong then assert as there is no way to recover from this
357 * situation.
358 */
359 if (psci_plat_pm_ops->affinst_suspend_finish) {
360 plat_state = psci_get_phys_state(prev_state);
361 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
362 cpu_node->level,
363 plat_state);
364 assert(rc == PSCI_E_SUCCESS);
365 }
366
367 /* Get the index for restoring the re-entry information */
368 index = cpu_node->data;
369
370 /*
371 * Arch. management: Restore the stashed secure architectural
372 * context in the right order.
373 */
374 write_vbar(psci_secure_context[index].vbar);
Sandrine Bailleux37382742013-11-18 17:26:59 +0000375 write_daif(read_daif() | psci_secure_context[index].pstate);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100376 write_mair(psci_secure_context[index].mair);
377 write_tcr(psci_secure_context[index].tcr);
378 write_ttbr0(psci_secure_context[index].ttbr);
379 write_sctlr(psci_secure_context[index].sctlr);
380
381 /* MMU and coherency should be enabled by now */
382 write_scr(psci_secure_context[index].scr);
383 write_cptr(psci_secure_context[index].cptr);
384 write_cpacr(psci_secure_context[index].cpacr);
385 write_cntfrq_el0(psci_secure_context[index].cntfrq);
386
387 /*
388 * Generic management: Now we just need to retrieve the
389 * information that we had stashed away during the suspend
390 * call to set this cpu on it's way.
391 */
Achin Guptac8afc782013-11-25 18:45:02 +0000392 psci_get_ns_entry_info(index);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100393
394 /* Clean caches before re-entering normal world */
395 dcsw_op_louis(DCCSW);
396
397 return rc;
398}
399
400static unsigned int psci_afflvl1_suspend_finish(unsigned long mpidr,
401 aff_map_node *cluster_node,
402 unsigned int prev_state)
403{
404 unsigned int rc = 0;
405 unsigned int plat_state;
406
407 assert(cluster_node->level == MPIDR_AFFLVL1);
408
409 /*
410 * Plat. management: Perform the platform specific actions
411 * as per the old state of the cluster e.g. enabling
412 * coherency at the interconnect depends upon the state with
413 * which this cluster was powered up. If anything goes wrong
414 * then assert as there is no way to recover from this
415 * situation.
416 */
417 if (psci_plat_pm_ops->affinst_suspend_finish) {
418 plat_state = psci_get_phys_state(prev_state);
419 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
420 cluster_node->level,
421 plat_state);
422 assert(rc == PSCI_E_SUCCESS);
423 }
424
425 return rc;
426}
427
428
429static unsigned int psci_afflvl2_suspend_finish(unsigned long mpidr,
430 aff_map_node *system_node,
431 unsigned int target_afflvl)
432{
433 int rc = PSCI_E_SUCCESS;
434 unsigned int plat_state;
435
436 /* Cannot go beyond this affinity level */
437 assert(system_node->level == MPIDR_AFFLVL2);
438
439 /*
440 * Currently, there are no architectural actions to perform
441 * at the system level.
442 */
443
444 /*
445 * Plat. management: Perform the platform specific actions
446 * as per the old state of the cluster e.g. enabling
447 * coherency at the interconnect depends upon the state with
448 * which this cluster was powered up. If anything goes wrong
449 * then assert as there is no way to recover from this
450 * situation.
451 */
452 if (psci_plat_pm_ops->affinst_suspend_finish) {
453 plat_state = psci_get_phys_state(system_node->state);
454 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
455 system_node->level,
456 plat_state);
457 assert(rc == PSCI_E_SUCCESS);
458 }
459
460 return rc;
461}
462
463const afflvl_power_on_finisher psci_afflvl_suspend_finishers[] = {
464 psci_afflvl0_suspend_finish,
465 psci_afflvl1_suspend_finish,
466 psci_afflvl2_suspend_finish,
467};
468