blob: ea9038963ee30de9c275bd97cc2f2c6682cb8438 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Achin Gupta4f6ad662013-10-25 09:08:21 +010031#include <assert.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010032#include <bl_common.h>
33#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010034#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010035#include <context.h>
Achin Guptaef7a28c2014-02-01 08:59:56 +000036#include <context_mgmt.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010037#include <runtime_svc.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010038#include <stddef.h>
Dan Handley714a0d22014-04-09 13:13:04 +010039#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010040
Dan Handleye2712bc2014-04-10 15:37:22 +010041typedef int (*afflvl_suspend_handler_t)(unsigned long,
42 aff_map_node_t *,
Achin Gupta4f6ad662013-10-25 09:08:21 +010043 unsigned long,
44 unsigned long,
45 unsigned int);
46
47/*******************************************************************************
Vikram Kanigirif100f412014-04-01 19:26:26 +010048 * This function sets the power state of the current cpu while
49 * powering down during a cpu_suspend call
Achin Guptaa45e3972013-12-05 15:10:48 +000050 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +010051void psci_set_suspend_power_state(aff_map_node_t *node, unsigned int power_state)
Achin Guptaa45e3972013-12-05 15:10:48 +000052{
53 /*
54 * Check that nobody else is calling this function on our behalf &
55 * this information is being set only in the cpu node
56 */
57 assert(node->mpidr == (read_mpidr() & MPIDR_AFFINITY_MASK));
58 assert(node->level == MPIDR_AFFLVL0);
59
60 /*
Andrew Thoelkee9a0d112014-06-20 00:38:03 +010061 * Save PSCI power state parameter for the core in suspend context.
62 * The node is in always-coherent RAM so it does not need to be flushed
Achin Guptaa45e3972013-12-05 15:10:48 +000063 */
Andrew Thoelkee9a0d112014-06-20 00:38:03 +010064 node->power_state = power_state;
Achin Guptaa45e3972013-12-05 15:10:48 +000065}
66
67/*******************************************************************************
Vikram Kanigirif100f412014-04-01 19:26:26 +010068 * This function gets the affinity level till which a cpu is powered down
69 * during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
70 * power state saved for the node is invalid
71 ******************************************************************************/
72int psci_get_suspend_afflvl(unsigned long mpidr)
73{
Dan Handleye2712bc2014-04-10 15:37:22 +010074 aff_map_node_t *node;
Vikram Kanigirif100f412014-04-01 19:26:26 +010075
76 node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK,
77 MPIDR_AFFLVL0);
78 assert(node);
79
80 return psci_get_aff_map_node_suspend_afflvl(node);
81}
82
83
84/*******************************************************************************
Achin Guptaa45e3972013-12-05 15:10:48 +000085 * This function gets the affinity level till which the current cpu was powered
Vikram Kanigirif100f412014-04-01 19:26:26 +010086 * down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
87 * power state saved for the node is invalid
88 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +010089int psci_get_aff_map_node_suspend_afflvl(aff_map_node_t *node)
Vikram Kanigirif100f412014-04-01 19:26:26 +010090{
91 unsigned int power_state;
92
93 assert(node->level == MPIDR_AFFLVL0);
94
Andrew Thoelkee9a0d112014-06-20 00:38:03 +010095 power_state = node->power_state;
Vikram Kanigirif100f412014-04-01 19:26:26 +010096 return ((power_state == PSCI_INVALID_DATA) ?
97 power_state : psci_get_pstate_afflvl(power_state));
98}
99
100/*******************************************************************************
101 * This function gets the state id of a cpu stored in suspend context
102 * while powering down during a cpu_suspend call. Returns 0xFFFFFFFF
103 * if the power state saved for the node is invalid
Achin Guptaa45e3972013-12-05 15:10:48 +0000104 ******************************************************************************/
Vikram Kanigirif100f412014-04-01 19:26:26 +0100105int psci_get_suspend_stateid(unsigned long mpidr)
Achin Guptaa45e3972013-12-05 15:10:48 +0000106{
Dan Handleye2712bc2014-04-10 15:37:22 +0100107 aff_map_node_t *node;
Vikram Kanigirif100f412014-04-01 19:26:26 +0100108 unsigned int power_state;
109
110 node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK,
111 MPIDR_AFFLVL0);
112 assert(node);
113 assert(node->level == MPIDR_AFFLVL0);
114
Andrew Thoelkee9a0d112014-06-20 00:38:03 +0100115 power_state = node->power_state;
Vikram Kanigirif100f412014-04-01 19:26:26 +0100116 return ((power_state == PSCI_INVALID_DATA) ?
117 power_state : psci_get_pstate_id(power_state));
Achin Guptaa45e3972013-12-05 15:10:48 +0000118}
119
120/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100121 * The next three functions implement a handler for each supported affinity
122 * level which is called when that affinity level is about to be suspended.
123 ******************************************************************************/
124static int psci_afflvl0_suspend(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100125 aff_map_node_t *cpu_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100126 unsigned long ns_entrypoint,
127 unsigned long context_id,
128 unsigned int power_state)
129{
Andrew Thoelke4e126072014-06-04 21:10:52 +0100130 unsigned int plat_state;
Vikram Kanigiri78a6e0c2014-03-11 17:41:00 +0000131 unsigned long psci_entrypoint, sctlr;
Dan Handleye2712bc2014-04-10 15:37:22 +0100132 el3_state_t *saved_el3_state;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100133 uint32_t ns_scr_el3 = read_scr_el3();
134 uint32_t ns_sctlr_el1 = read_sctlr_el1();
135 int rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100136
137 /* Sanity check to safeguard against data corruption */
138 assert(cpu_node->level == MPIDR_AFFLVL0);
139
Vikram Kanigirif100f412014-04-01 19:26:26 +0100140 /* Save PSCI power state parameter for the core in suspend context */
141 psci_set_suspend_power_state(cpu_node, power_state);
142
Achin Gupta607084e2014-02-09 18:24:19 +0000143 /*
144 * Generic management: Store the re-entry information for the non-secure
145 * world and allow the secure world to suspend itself
146 */
147
148 /*
149 * Call the cpu suspend handler registered by the Secure Payload
150 * Dispatcher to let it do any bookeeping. If the handler encounters an
151 * error, it's expected to assert within
152 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000153 if (psci_spd_pm && psci_spd_pm->svc_suspend)
154 psci_spd_pm->svc_suspend(power_state);
Achin Gupta607084e2014-02-09 18:24:19 +0000155
Achin Gupta75f73672013-12-05 16:33:10 +0000156 /* State management: mark this cpu as suspended */
157 psci_set_state(cpu_node, PSCI_STATE_SUSPEND);
158
Achin Gupta4f6ad662013-10-25 09:08:21 +0100159 /*
160 * Generic management: Store the re-entry information for the
161 * non-secure world
162 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100163 rc = psci_save_ns_entry(read_mpidr_el1(), ns_entrypoint, context_id,
164 ns_scr_el3, ns_sctlr_el1);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100165 if (rc != PSCI_E_SUCCESS)
166 return rc;
167
168 /*
Achin Guptaef7a28c2014-02-01 08:59:56 +0000169 * Arch. management: Save the EL3 state in the 'cpu_context'
170 * structure that has been allocated for this cpu, flush the
Achin Gupta4f6ad662013-10-25 09:08:21 +0100171 * L1 caches and exit intra-cluster coherency et al
172 */
Achin Guptaef7a28c2014-02-01 08:59:56 +0000173 cm_el3_sysregs_context_save(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174
Achin Gupta0a9f7472014-02-09 17:48:12 +0000175 /*
176 * The EL3 state to PoC since it will be accessed after a
177 * reset with the caches turned off
178 */
Andrew Thoelkea2f65532014-05-14 17:09:32 +0100179 saved_el3_state = get_el3state_ctx(cm_get_context(NON_SECURE));
Achin Gupta0a9f7472014-02-09 17:48:12 +0000180 flush_dcache_range((uint64_t) saved_el3_state, sizeof(*saved_el3_state));
181
Achin Gupta4f6ad662013-10-25 09:08:21 +0100182 /* Set the secure world (EL3) re-entry point after BL1 */
183 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
184
185 /*
186 * Arch. management. Perform the necessary steps to flush all
187 * cpu caches.
188 *
189 * TODO: This power down sequence varies across cpus so it needs to be
190 * abstracted out on the basis of the MIDR like in cpu_reset_handler().
191 * Do the bare minimal for the time being. Fix this before porting to
192 * Cortex models.
193 */
Vikram Kanigiri78a6e0c2014-03-11 17:41:00 +0000194 sctlr = read_sctlr_el3();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100195 sctlr &= ~SCTLR_C_BIT;
Vikram Kanigiri78a6e0c2014-03-11 17:41:00 +0000196 write_sctlr_el3(sctlr);
Andrew Thoelke42e75a72014-04-28 12:28:39 +0100197 isb(); /* ensure MMU disable takes immediate effect */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100198
199 /*
200 * CAUTION: This flush to the level of unification makes an assumption
201 * about the cache hierarchy at affinity level 0 (cpu) in the platform.
202 * Ideally the platform should tell psci which levels to flush to exit
203 * coherency.
204 */
205 dcsw_op_louis(DCCISW);
206
207 /*
208 * Plat. management: Allow the platform to perform the
209 * necessary actions to turn off this cpu e.g. set the
210 * platform defined mailbox with the psci entrypoint,
211 * program the power controller etc.
212 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100213 rc = PSCI_E_SUCCESS;
214
Achin Gupta4f6ad662013-10-25 09:08:21 +0100215 if (psci_plat_pm_ops->affinst_suspend) {
Achin Gupta75f73672013-12-05 16:33:10 +0000216 plat_state = psci_get_phys_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100217 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
218 psci_entrypoint,
219 ns_entrypoint,
220 cpu_node->level,
221 plat_state);
222 }
223
224 return rc;
225}
226
227static int psci_afflvl1_suspend(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100228 aff_map_node_t *cluster_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100229 unsigned long ns_entrypoint,
230 unsigned long context_id,
231 unsigned int power_state)
232{
233 int rc = PSCI_E_SUCCESS;
234 unsigned int plat_state;
235 unsigned long psci_entrypoint;
236
237 /* Sanity check the cluster level */
238 assert(cluster_node->level == MPIDR_AFFLVL1);
239
Achin Gupta75f73672013-12-05 16:33:10 +0000240 /* State management: Decrement the cluster reference count */
241 psci_set_state(cluster_node, PSCI_STATE_SUSPEND);
242
Achin Gupta4f6ad662013-10-25 09:08:21 +0100243 /*
244 * Keep the physical state of this cluster handy to decide
245 * what action needs to be taken
246 */
Achin Gupta75f73672013-12-05 16:33:10 +0000247 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100248
249 /*
250 * Arch. management: Flush all levels of caches to PoC if the
251 * cluster is to be shutdown
252 */
253 if (plat_state == PSCI_STATE_OFF)
254 dcsw_op_all(DCCISW);
255
256 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000257 * Plat. Management. Allow the platform to do its cluster
Achin Gupta4f6ad662013-10-25 09:08:21 +0100258 * specific bookeeping e.g. turn off interconnect coherency,
259 * program the power controller etc.
260 */
261 if (psci_plat_pm_ops->affinst_suspend) {
262
263 /*
264 * Sending the psci entrypoint is currently redundant
265 * beyond affinity level 0 but one never knows what a
266 * platform might do. Also it allows us to keep the
267 * platform handler prototype the same.
268 */
269 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100270 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
271 psci_entrypoint,
272 ns_entrypoint,
273 cluster_node->level,
274 plat_state);
275 }
276
277 return rc;
278}
279
280
281static int psci_afflvl2_suspend(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100282 aff_map_node_t *system_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100283 unsigned long ns_entrypoint,
284 unsigned long context_id,
285 unsigned int power_state)
286{
287 int rc = PSCI_E_SUCCESS;
288 unsigned int plat_state;
289 unsigned long psci_entrypoint;
290
291 /* Cannot go beyond this */
292 assert(system_node->level == MPIDR_AFFLVL2);
293
Achin Gupta75f73672013-12-05 16:33:10 +0000294 /* State management: Decrement the system reference count */
295 psci_set_state(system_node, PSCI_STATE_SUSPEND);
296
Achin Gupta4f6ad662013-10-25 09:08:21 +0100297 /*
298 * Keep the physical state of the system handy to decide what
299 * action needs to be taken
300 */
Achin Gupta75f73672013-12-05 16:33:10 +0000301 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100302
303 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000304 * Plat. Management : Allow the platform to do its bookeeping
Achin Gupta4f6ad662013-10-25 09:08:21 +0100305 * at this affinity level
306 */
307 if (psci_plat_pm_ops->affinst_suspend) {
308
309 /*
310 * Sending the psci entrypoint is currently redundant
311 * beyond affinity level 0 but one never knows what a
312 * platform might do. Also it allows us to keep the
313 * platform handler prototype the same.
314 */
315 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100316 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
317 psci_entrypoint,
318 ns_entrypoint,
319 system_node->level,
320 plat_state);
321 }
322
323 return rc;
324}
325
Dan Handleye2712bc2014-04-10 15:37:22 +0100326static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100327 psci_afflvl0_suspend,
328 psci_afflvl1_suspend,
329 psci_afflvl2_suspend,
330};
331
332/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000333 * This function takes an array of pointers to affinity instance nodes in the
334 * topology tree and calls the suspend handler for the corresponding affinity
335 * levels
336 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100337static int psci_call_suspend_handlers(mpidr_aff_map_nodes_t mpidr_nodes,
Achin Gupta0959db52013-12-02 17:33:04 +0000338 int start_afflvl,
339 int end_afflvl,
340 unsigned long mpidr,
341 unsigned long entrypoint,
342 unsigned long context_id,
343 unsigned int power_state)
344{
345 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleye2712bc2014-04-10 15:37:22 +0100346 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000347
348 for (level = start_afflvl; level <= end_afflvl; level++) {
349 node = mpidr_nodes[level];
350 if (node == NULL)
351 continue;
352
353 /*
354 * TODO: In case of an error should there be a way
355 * of restoring what we might have torn down at
356 * lower affinity levels.
357 */
358 rc = psci_afflvl_suspend_handlers[level](mpidr,
359 node,
360 entrypoint,
361 context_id,
362 power_state);
363 if (rc != PSCI_E_SUCCESS)
364 break;
365 }
366
367 return rc;
368}
369
370/*******************************************************************************
371 * Top level handler which is called when a cpu wants to suspend its execution.
372 * It is assumed that along with turning the cpu off, higher affinity levels
373 * until the target affinity level will be turned off as well. It traverses
374 * through all the affinity levels performing generic, architectural, platform
375 * setup and state management e.g. for a cluster that's to be suspended, it will
376 * call the platform specific code which will disable coherency at the
377 * interconnect level if the cpu is the last in the cluster. For a cpu it could
378 * mean programming the power controller etc.
379 *
380 * The state of all the relevant affinity levels is changed prior to calling the
381 * affinity level specific handlers as their actions would depend upon the state
382 * the affinity level is about to enter.
383 *
384 * The affinity level specific handlers are called in ascending order i.e. from
385 * the lowest to the highest affinity level implemented by the platform because
386 * to turn off affinity level X it is neccesary to turn off affinity level X - 1
387 * first.
388 *
389 * CAUTION: This function is called with coherent stacks so that coherency can
390 * be turned off and caches can be flushed safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100391 ******************************************************************************/
392int psci_afflvl_suspend(unsigned long mpidr,
393 unsigned long entrypoint,
394 unsigned long context_id,
395 unsigned int power_state,
Achin Gupta0959db52013-12-02 17:33:04 +0000396 int start_afflvl,
397 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100398{
Achin Gupta0959db52013-12-02 17:33:04 +0000399 int rc = PSCI_E_SUCCESS;
Dan Handleye2712bc2014-04-10 15:37:22 +0100400 mpidr_aff_map_nodes_t mpidr_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100401
402 mpidr &= MPIDR_AFFINITY_MASK;
403
404 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000405 * Collect the pointers to the nodes in the topology tree for
406 * each affinity instance in the mpidr. If this function does
407 * not return successfully then either the mpidr or the affinity
408 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100409 */
Achin Gupta0959db52013-12-02 17:33:04 +0000410 rc = psci_get_aff_map_nodes(mpidr,
411 start_afflvl,
412 end_afflvl,
413 mpidr_nodes);
414 if (rc != PSCI_E_SUCCESS)
415 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100416
417 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000418 * This function acquires the lock corresponding to each affinity
419 * level so that by the time all locks are taken, the system topology
420 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100421 */
Achin Gupta0959db52013-12-02 17:33:04 +0000422 psci_acquire_afflvl_locks(mpidr,
423 start_afflvl,
424 end_afflvl,
425 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100426
Achin Gupta0959db52013-12-02 17:33:04 +0000427 /* Perform generic, architecture and platform specific handling */
428 rc = psci_call_suspend_handlers(mpidr_nodes,
429 start_afflvl,
430 end_afflvl,
431 mpidr,
432 entrypoint,
433 context_id,
434 power_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100435
436 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000437 * Release the locks corresponding to each affinity level in the
438 * reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100439 */
Achin Gupta0959db52013-12-02 17:33:04 +0000440 psci_release_afflvl_locks(mpidr,
441 start_afflvl,
442 end_afflvl,
443 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100444
Achin Gupta4f6ad662013-10-25 09:08:21 +0100445 return rc;
446}
447
448/*******************************************************************************
449 * The following functions finish an earlier affinity suspend request. They
450 * are called by the common finisher routine in psci_common.c.
451 ******************************************************************************/
452static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100453 aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100454{
Andrew Thoelke4e126072014-06-04 21:10:52 +0100455 unsigned int plat_state, state, rc;
Achin Gupta607084e2014-02-09 18:24:19 +0000456 int32_t suspend_level;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100457
458 assert(cpu_node->level == MPIDR_AFFLVL0);
459
Achin Gupta0959db52013-12-02 17:33:04 +0000460 /* Ensure we have been woken up from a suspended state */
Achin Gupta75f73672013-12-05 16:33:10 +0000461 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000462 assert(state == PSCI_STATE_SUSPEND);
463
Achin Gupta4f6ad662013-10-25 09:08:21 +0100464 /*
465 * Plat. management: Perform the platform specific actions
466 * before we change the state of the cpu e.g. enabling the
467 * gic or zeroing the mailbox register. If anything goes
468 * wrong then assert as there is no way to recover from this
469 * situation.
470 */
471 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000472
473 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000474 plat_state = get_phys_state(state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100475 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
476 cpu_node->level,
477 plat_state);
478 assert(rc == PSCI_E_SUCCESS);
479 }
480
481 /* Get the index for restoring the re-entry information */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100482 /*
Achin Guptaef7a28c2014-02-01 08:59:56 +0000483 * Arch. management: Restore the stashed EL3 architectural
484 * context from the 'cpu_context' structure for this cpu.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100485 */
Achin Guptaef7a28c2014-02-01 08:59:56 +0000486 cm_el3_sysregs_context_restore(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100487
488 /*
Achin Gupta607084e2014-02-09 18:24:19 +0000489 * Call the cpu suspend finish handler registered by the Secure Payload
490 * Dispatcher to let it do any bookeeping. If the handler encounters an
491 * error, it's expected to assert within
492 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000493 if (psci_spd_pm && psci_spd_pm->svc_suspend) {
Vikram Kanigirif100f412014-04-01 19:26:26 +0100494 suspend_level = psci_get_aff_map_node_suspend_afflvl(cpu_node);
495 assert (suspend_level != PSCI_INVALID_DATA);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000496 psci_spd_pm->svc_suspend_finish(suspend_level);
Achin Gupta607084e2014-02-09 18:24:19 +0000497 }
498
Vikram Kanigirif100f412014-04-01 19:26:26 +0100499 /* Invalidate the suspend context for the node */
500 psci_set_suspend_power_state(cpu_node, PSCI_INVALID_DATA);
501
Achin Gupta607084e2014-02-09 18:24:19 +0000502 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100503 * Generic management: Now we just need to retrieve the
504 * information that we had stashed away during the suspend
Achin Gupta3140a9e2013-12-02 16:23:12 +0000505 * call to set this cpu on its way.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100506 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100507 cm_prepare_el3_exit(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100508
Achin Gupta75f73672013-12-05 16:33:10 +0000509 /* State management: mark this cpu as on */
510 psci_set_state(cpu_node, PSCI_STATE_ON);
511
Achin Gupta4f6ad662013-10-25 09:08:21 +0100512 /* Clean caches before re-entering normal world */
513 dcsw_op_louis(DCCSW);
514
Andrew Thoelke4e126072014-06-04 21:10:52 +0100515 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100516 return rc;
517}
518
519static unsigned int psci_afflvl1_suspend_finish(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100520 aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100521{
Achin Gupta0959db52013-12-02 17:33:04 +0000522 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100523
524 assert(cluster_node->level == MPIDR_AFFLVL1);
525
526 /*
527 * Plat. management: Perform the platform specific actions
528 * as per the old state of the cluster e.g. enabling
529 * coherency at the interconnect depends upon the state with
530 * which this cluster was powered up. If anything goes wrong
531 * then assert as there is no way to recover from this
532 * situation.
533 */
534 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000535
536 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000537 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100538 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
539 cluster_node->level,
540 plat_state);
541 assert(rc == PSCI_E_SUCCESS);
542 }
543
Achin Gupta75f73672013-12-05 16:33:10 +0000544 /* State management: Increment the cluster reference count */
545 psci_set_state(cluster_node, PSCI_STATE_ON);
546
Achin Gupta4f6ad662013-10-25 09:08:21 +0100547 return rc;
548}
549
550
551static unsigned int psci_afflvl2_suspend_finish(unsigned long mpidr,
Dan Handleye2712bc2014-04-10 15:37:22 +0100552 aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100553{
Achin Gupta0959db52013-12-02 17:33:04 +0000554 unsigned int plat_state, rc = PSCI_E_SUCCESS;;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100555
556 /* Cannot go beyond this affinity level */
557 assert(system_node->level == MPIDR_AFFLVL2);
558
559 /*
560 * Currently, there are no architectural actions to perform
561 * at the system level.
562 */
563
564 /*
565 * Plat. management: Perform the platform specific actions
566 * as per the old state of the cluster e.g. enabling
567 * coherency at the interconnect depends upon the state with
568 * which this cluster was powered up. If anything goes wrong
569 * then assert as there is no way to recover from this
570 * situation.
571 */
572 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000573
574 /* Get the physical state of the system */
Achin Gupta75f73672013-12-05 16:33:10 +0000575 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100576 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
577 system_node->level,
578 plat_state);
579 assert(rc == PSCI_E_SUCCESS);
580 }
581
Achin Gupta75f73672013-12-05 16:33:10 +0000582 /* State management: Increment the system reference count */
583 psci_set_state(system_node, PSCI_STATE_ON);
584
Achin Gupta4f6ad662013-10-25 09:08:21 +0100585 return rc;
586}
587
Dan Handleye2712bc2014-04-10 15:37:22 +0100588const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100589 psci_afflvl0_suspend_finish,
590 psci_afflvl1_suspend_finish,
591 psci_afflvl2_suspend_finish,
592};
593