blob: 4fb640a2d8a372fc9dd84f1640ceaf849fe7b9c8 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Achin Gupta4f6ad662013-10-25 09:08:21 +010031#include <assert.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010032#include <bl_common.h>
33#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010034#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010035#include <context.h>
Achin Guptaef7a28c2014-02-01 08:59:56 +000036#include <context_mgmt.h>
Achin Guptaf3ccbab2014-07-25 14:52:47 +010037#include <cpu_data.h>
Soby Mathew2ed46e92014-07-04 16:02:26 +010038#include <platform.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010039#include <runtime_svc.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010040#include <stddef.h>
Dan Handley714a0d22014-04-09 13:13:04 +010041#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010042
Andrew Thoelke2bc07852014-06-09 12:44:21 +010043typedef int (*afflvl_suspend_handler_t)(aff_map_node_t *,
Achin Gupta4f6ad662013-10-25 09:08:21 +010044 unsigned long,
45 unsigned long,
46 unsigned int);
47
48/*******************************************************************************
Achin Guptaf3ccbab2014-07-25 14:52:47 +010049 * This function saves the power state parameter passed in the current PSCI
50 * cpu_suspend call in the per-cpu data array.
Achin Guptaa45e3972013-12-05 15:10:48 +000051 ******************************************************************************/
Achin Guptaf3ccbab2014-07-25 14:52:47 +010052void psci_set_suspend_power_state(unsigned int power_state)
Achin Guptaa45e3972013-12-05 15:10:48 +000053{
Achin Guptaf3ccbab2014-07-25 14:52:47 +010054 set_cpu_data(psci_svc_cpu_data.power_state, power_state);
55 flush_cpu_data(psci_svc_cpu_data.power_state);
Achin Guptaa45e3972013-12-05 15:10:48 +000056}
57
58/*******************************************************************************
Achin Guptaf3ccbab2014-07-25 14:52:47 +010059 * This function gets the affinity level till which the current cpu could be
60 * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
61 * power state is invalid.
Vikram Kanigirif100f412014-04-01 19:26:26 +010062 ******************************************************************************/
Achin Guptaf3ccbab2014-07-25 14:52:47 +010063int psci_get_suspend_afflvl()
Vikram Kanigirif100f412014-04-01 19:26:26 +010064{
Achin Guptaf3ccbab2014-07-25 14:52:47 +010065 unsigned int power_state;
Vikram Kanigirif100f412014-04-01 19:26:26 +010066
Achin Guptaf3ccbab2014-07-25 14:52:47 +010067 power_state = get_cpu_data(psci_svc_cpu_data.power_state);
Vikram Kanigirif100f412014-04-01 19:26:26 +010068
Achin Guptaf3ccbab2014-07-25 14:52:47 +010069 return ((power_state == PSCI_INVALID_DATA) ?
70 power_state : psci_get_pstate_afflvl(power_state));
Vikram Kanigirif100f412014-04-01 19:26:26 +010071}
72
Vikram Kanigirif100f412014-04-01 19:26:26 +010073/*******************************************************************************
Achin Guptaf3ccbab2014-07-25 14:52:47 +010074 * This function gets the state id of the current cpu from the power state
75 * parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the
76 * power state saved is invalid.
Vikram Kanigirif100f412014-04-01 19:26:26 +010077 ******************************************************************************/
Achin Guptaf3ccbab2014-07-25 14:52:47 +010078int psci_get_suspend_stateid()
Vikram Kanigirif100f412014-04-01 19:26:26 +010079{
80 unsigned int power_state;
81
Achin Guptaf3ccbab2014-07-25 14:52:47 +010082 power_state = get_cpu_data(psci_svc_cpu_data.power_state);
Vikram Kanigirif100f412014-04-01 19:26:26 +010083
Vikram Kanigirif100f412014-04-01 19:26:26 +010084 return ((power_state == PSCI_INVALID_DATA) ?
Achin Guptaf3ccbab2014-07-25 14:52:47 +010085 power_state : psci_get_pstate_id(power_state));
Vikram Kanigirif100f412014-04-01 19:26:26 +010086}
87
88/*******************************************************************************
Achin Guptaf3ccbab2014-07-25 14:52:47 +010089 * This function gets the state id of the cpu specified by the 'mpidr' parameter
90 * from the power state parameter saved in the per-cpu data array. Returns
91 * PSCI_INVALID_DATA if the power state saved is invalid.
Achin Guptaa45e3972013-12-05 15:10:48 +000092 ******************************************************************************/
Achin Guptaf3ccbab2014-07-25 14:52:47 +010093int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
Achin Guptaa45e3972013-12-05 15:10:48 +000094{
Vikram Kanigirif100f412014-04-01 19:26:26 +010095 unsigned int power_state;
96
Achin Guptaf3ccbab2014-07-25 14:52:47 +010097 power_state = get_cpu_data_by_mpidr(mpidr,
98 psci_svc_cpu_data.power_state);
Vikram Kanigirif100f412014-04-01 19:26:26 +010099
Vikram Kanigirif100f412014-04-01 19:26:26 +0100100 return ((power_state == PSCI_INVALID_DATA) ?
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100101 power_state : psci_get_pstate_id(power_state));
Achin Guptaa45e3972013-12-05 15:10:48 +0000102}
103
104/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100105 * The next three functions implement a handler for each supported affinity
106 * level which is called when that affinity level is about to be suspended.
107 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100108static int psci_afflvl0_suspend(aff_map_node_t *cpu_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100109 unsigned long ns_entrypoint,
110 unsigned long context_id,
111 unsigned int power_state)
112{
Andrew Thoelke4e126072014-06-04 21:10:52 +0100113 unsigned int plat_state;
Achin Guptae1aa5162014-06-26 09:58:52 +0100114 unsigned long psci_entrypoint;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100115 uint32_t ns_scr_el3 = read_scr_el3();
116 uint32_t ns_sctlr_el1 = read_sctlr_el1();
117 int rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100118
119 /* Sanity check to safeguard against data corruption */
120 assert(cpu_node->level == MPIDR_AFFLVL0);
121
Vikram Kanigirif100f412014-04-01 19:26:26 +0100122 /* Save PSCI power state parameter for the core in suspend context */
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100123 psci_set_suspend_power_state(power_state);
Vikram Kanigirif100f412014-04-01 19:26:26 +0100124
Achin Gupta607084e2014-02-09 18:24:19 +0000125 /*
126 * Generic management: Store the re-entry information for the non-secure
127 * world and allow the secure world to suspend itself
128 */
129
130 /*
131 * Call the cpu suspend handler registered by the Secure Payload
132 * Dispatcher to let it do any bookeeping. If the handler encounters an
133 * error, it's expected to assert within
134 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000135 if (psci_spd_pm && psci_spd_pm->svc_suspend)
136 psci_spd_pm->svc_suspend(power_state);
Achin Gupta607084e2014-02-09 18:24:19 +0000137
Achin Gupta4f6ad662013-10-25 09:08:21 +0100138 /*
139 * Generic management: Store the re-entry information for the
140 * non-secure world
141 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100142 rc = psci_save_ns_entry(read_mpidr_el1(), ns_entrypoint, context_id,
143 ns_scr_el3, ns_sctlr_el1);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100144 if (rc != PSCI_E_SUCCESS)
145 return rc;
146
Achin Gupta4f6ad662013-10-25 09:08:21 +0100147 /* Set the secure world (EL3) re-entry point after BL1 */
148 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
149
150 /*
151 * Arch. management. Perform the necessary steps to flush all
152 * cpu caches.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100153 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100154 psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100155
156 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100157 * Plat. management: Allow the platform to perform the
158 * necessary actions to turn off this cpu e.g. set the
159 * platform defined mailbox with the psci entrypoint,
160 * program the power controller etc.
161 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100162 rc = PSCI_E_SUCCESS;
163
Achin Gupta4f6ad662013-10-25 09:08:21 +0100164 if (psci_plat_pm_ops->affinst_suspend) {
Achin Gupta75f73672013-12-05 16:33:10 +0000165 plat_state = psci_get_phys_state(cpu_node);
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100166 rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100167 psci_entrypoint,
168 ns_entrypoint,
169 cpu_node->level,
170 plat_state);
171 }
172
173 return rc;
174}
175
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100176static int psci_afflvl1_suspend(aff_map_node_t *cluster_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100177 unsigned long ns_entrypoint,
178 unsigned long context_id,
179 unsigned int power_state)
180{
181 int rc = PSCI_E_SUCCESS;
182 unsigned int plat_state;
183 unsigned long psci_entrypoint;
184
185 /* Sanity check the cluster level */
186 assert(cluster_node->level == MPIDR_AFFLVL1);
187
188 /*
189 * Keep the physical state of this cluster handy to decide
190 * what action needs to be taken
191 */
Achin Gupta75f73672013-12-05 16:33:10 +0000192 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100193
194 /*
195 * Arch. management: Flush all levels of caches to PoC if the
196 * cluster is to be shutdown
197 */
198 if (plat_state == PSCI_STATE_OFF)
199 dcsw_op_all(DCCISW);
200
201 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000202 * Plat. Management. Allow the platform to do its cluster
Achin Gupta4f6ad662013-10-25 09:08:21 +0100203 * specific bookeeping e.g. turn off interconnect coherency,
204 * program the power controller etc.
205 */
206 if (psci_plat_pm_ops->affinst_suspend) {
207
208 /*
209 * Sending the psci entrypoint is currently redundant
210 * beyond affinity level 0 but one never knows what a
211 * platform might do. Also it allows us to keep the
212 * platform handler prototype the same.
213 */
214 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100215 rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100216 psci_entrypoint,
217 ns_entrypoint,
218 cluster_node->level,
219 plat_state);
220 }
221
222 return rc;
223}
224
225
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100226static int psci_afflvl2_suspend(aff_map_node_t *system_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100227 unsigned long ns_entrypoint,
228 unsigned long context_id,
229 unsigned int power_state)
230{
231 int rc = PSCI_E_SUCCESS;
232 unsigned int plat_state;
233 unsigned long psci_entrypoint;
234
235 /* Cannot go beyond this */
236 assert(system_node->level == MPIDR_AFFLVL2);
237
238 /*
239 * Keep the physical state of the system handy to decide what
240 * action needs to be taken
241 */
Achin Gupta75f73672013-12-05 16:33:10 +0000242 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100243
244 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000245 * Plat. Management : Allow the platform to do its bookeeping
Achin Gupta4f6ad662013-10-25 09:08:21 +0100246 * at this affinity level
247 */
248 if (psci_plat_pm_ops->affinst_suspend) {
249
250 /*
251 * Sending the psci entrypoint is currently redundant
252 * beyond affinity level 0 but one never knows what a
253 * platform might do. Also it allows us to keep the
254 * platform handler prototype the same.
255 */
256 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100257 rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100258 psci_entrypoint,
259 ns_entrypoint,
260 system_node->level,
261 plat_state);
262 }
263
264 return rc;
265}
266
Dan Handleye2712bc2014-04-10 15:37:22 +0100267static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100268 psci_afflvl0_suspend,
269 psci_afflvl1_suspend,
270 psci_afflvl2_suspend,
271};
272
273/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000274 * This function takes an array of pointers to affinity instance nodes in the
275 * topology tree and calls the suspend handler for the corresponding affinity
276 * levels
277 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100278static int psci_call_suspend_handlers(mpidr_aff_map_nodes_t mpidr_nodes,
Achin Gupta0959db52013-12-02 17:33:04 +0000279 int start_afflvl,
280 int end_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000281 unsigned long entrypoint,
282 unsigned long context_id,
283 unsigned int power_state)
284{
285 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleye2712bc2014-04-10 15:37:22 +0100286 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000287
288 for (level = start_afflvl; level <= end_afflvl; level++) {
289 node = mpidr_nodes[level];
290 if (node == NULL)
291 continue;
292
293 /*
294 * TODO: In case of an error should there be a way
295 * of restoring what we might have torn down at
296 * lower affinity levels.
297 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100298 rc = psci_afflvl_suspend_handlers[level](node,
Achin Gupta0959db52013-12-02 17:33:04 +0000299 entrypoint,
300 context_id,
301 power_state);
302 if (rc != PSCI_E_SUCCESS)
303 break;
304 }
305
306 return rc;
307}
308
309/*******************************************************************************
310 * Top level handler which is called when a cpu wants to suspend its execution.
311 * It is assumed that along with turning the cpu off, higher affinity levels
312 * until the target affinity level will be turned off as well. It traverses
313 * through all the affinity levels performing generic, architectural, platform
314 * setup and state management e.g. for a cluster that's to be suspended, it will
315 * call the platform specific code which will disable coherency at the
316 * interconnect level if the cpu is the last in the cluster. For a cpu it could
317 * mean programming the power controller etc.
318 *
319 * The state of all the relevant affinity levels is changed prior to calling the
320 * affinity level specific handlers as their actions would depend upon the state
321 * the affinity level is about to enter.
322 *
323 * The affinity level specific handlers are called in ascending order i.e. from
324 * the lowest to the highest affinity level implemented by the platform because
325 * to turn off affinity level X it is neccesary to turn off affinity level X - 1
326 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100327 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100328int psci_afflvl_suspend(unsigned long entrypoint,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100329 unsigned long context_id,
330 unsigned int power_state,
Achin Gupta0959db52013-12-02 17:33:04 +0000331 int start_afflvl,
332 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100333{
Achin Gupta0959db52013-12-02 17:33:04 +0000334 int rc = PSCI_E_SUCCESS;
Dan Handleye2712bc2014-04-10 15:37:22 +0100335 mpidr_aff_map_nodes_t mpidr_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100336
Achin Gupta4f6ad662013-10-25 09:08:21 +0100337 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000338 * Collect the pointers to the nodes in the topology tree for
339 * each affinity instance in the mpidr. If this function does
340 * not return successfully then either the mpidr or the affinity
341 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100342 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100343 rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
Achin Gupta0959db52013-12-02 17:33:04 +0000344 start_afflvl,
345 end_afflvl,
346 mpidr_nodes);
347 if (rc != PSCI_E_SUCCESS)
348 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100349
350 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000351 * This function acquires the lock corresponding to each affinity
352 * level so that by the time all locks are taken, the system topology
353 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100354 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100355 psci_acquire_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000356 end_afflvl,
357 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100358
Achin Guptacab78e42014-07-28 00:09:01 +0100359 /*
360 * This function updates the state of each affinity instance
361 * corresponding to the mpidr in the range of affinity levels
362 * specified.
363 */
364 psci_do_afflvl_state_mgmt(start_afflvl,
365 end_afflvl,
366 mpidr_nodes,
367 PSCI_STATE_SUSPEND);
Achin Gupta0959db52013-12-02 17:33:04 +0000368 /* Perform generic, architecture and platform specific handling */
369 rc = psci_call_suspend_handlers(mpidr_nodes,
370 start_afflvl,
371 end_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000372 entrypoint,
373 context_id,
374 power_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100375
376 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000377 * Release the locks corresponding to each affinity level in the
378 * reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100379 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100380 psci_release_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000381 end_afflvl,
382 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100383
Achin Gupta4f6ad662013-10-25 09:08:21 +0100384 return rc;
385}
386
387/*******************************************************************************
388 * The following functions finish an earlier affinity suspend request. They
389 * are called by the common finisher routine in psci_common.c.
390 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100391static unsigned int psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100392{
Andrew Thoelke4e126072014-06-04 21:10:52 +0100393 unsigned int plat_state, state, rc;
Achin Gupta607084e2014-02-09 18:24:19 +0000394 int32_t suspend_level;
Soby Mathew2ed46e92014-07-04 16:02:26 +0100395 uint64_t counter_freq;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100396
397 assert(cpu_node->level == MPIDR_AFFLVL0);
398
Achin Gupta0959db52013-12-02 17:33:04 +0000399 /* Ensure we have been woken up from a suspended state */
Achin Gupta75f73672013-12-05 16:33:10 +0000400 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000401 assert(state == PSCI_STATE_SUSPEND);
402
Achin Gupta4f6ad662013-10-25 09:08:21 +0100403 /*
404 * Plat. management: Perform the platform specific actions
405 * before we change the state of the cpu e.g. enabling the
406 * gic or zeroing the mailbox register. If anything goes
407 * wrong then assert as there is no way to recover from this
408 * situation.
409 */
410 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000411
412 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000413 plat_state = get_phys_state(state);
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100414 rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100415 cpu_node->level,
416 plat_state);
417 assert(rc == PSCI_E_SUCCESS);
418 }
419
420 /* Get the index for restoring the re-entry information */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100421 /*
Achin Guptae1aa5162014-06-26 09:58:52 +0100422 * Arch. management: Enable the data cache, manage stack memory and
423 * restore the stashed EL3 architectural context from the 'cpu_context'
424 * structure for this cpu.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100425 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100426 psci_do_pwrup_cache_maintenance();
Soby Mathew2ed46e92014-07-04 16:02:26 +0100427
428 /* Re-init the cntfrq_el0 register */
429 counter_freq = plat_get_syscnt_freq();
430 write_cntfrq_el0(counter_freq);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100431
432 /*
Achin Gupta607084e2014-02-09 18:24:19 +0000433 * Call the cpu suspend finish handler registered by the Secure Payload
434 * Dispatcher to let it do any bookeeping. If the handler encounters an
435 * error, it's expected to assert within
436 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000437 if (psci_spd_pm && psci_spd_pm->svc_suspend) {
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100438 suspend_level = psci_get_suspend_afflvl();
Vikram Kanigirif100f412014-04-01 19:26:26 +0100439 assert (suspend_level != PSCI_INVALID_DATA);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000440 psci_spd_pm->svc_suspend_finish(suspend_level);
Achin Gupta607084e2014-02-09 18:24:19 +0000441 }
442
Vikram Kanigirif100f412014-04-01 19:26:26 +0100443 /* Invalidate the suspend context for the node */
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100444 psci_set_suspend_power_state(PSCI_INVALID_DATA);
Vikram Kanigirif100f412014-04-01 19:26:26 +0100445
Achin Gupta607084e2014-02-09 18:24:19 +0000446 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100447 * Generic management: Now we just need to retrieve the
448 * information that we had stashed away during the suspend
Achin Gupta3140a9e2013-12-02 16:23:12 +0000449 * call to set this cpu on its way.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100450 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100451 cm_prepare_el3_exit(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100452
453 /* Clean caches before re-entering normal world */
454 dcsw_op_louis(DCCSW);
455
Andrew Thoelke4e126072014-06-04 21:10:52 +0100456 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100457 return rc;
458}
459
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100460static unsigned int psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100461{
Achin Gupta0959db52013-12-02 17:33:04 +0000462 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100463
464 assert(cluster_node->level == MPIDR_AFFLVL1);
465
466 /*
467 * Plat. management: Perform the platform specific actions
468 * as per the old state of the cluster e.g. enabling
469 * coherency at the interconnect depends upon the state with
470 * which this cluster was powered up. If anything goes wrong
471 * then assert as there is no way to recover from this
472 * situation.
473 */
474 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000475
476 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000477 plat_state = psci_get_phys_state(cluster_node);
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100478 rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100479 cluster_node->level,
480 plat_state);
481 assert(rc == PSCI_E_SUCCESS);
482 }
483
484 return rc;
485}
486
487
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100488static unsigned int psci_afflvl2_suspend_finish(aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100489{
Achin Gupta0959db52013-12-02 17:33:04 +0000490 unsigned int plat_state, rc = PSCI_E_SUCCESS;;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100491
492 /* Cannot go beyond this affinity level */
493 assert(system_node->level == MPIDR_AFFLVL2);
494
495 /*
496 * Currently, there are no architectural actions to perform
497 * at the system level.
498 */
499
500 /*
501 * Plat. management: Perform the platform specific actions
502 * as per the old state of the cluster e.g. enabling
503 * coherency at the interconnect depends upon the state with
504 * which this cluster was powered up. If anything goes wrong
505 * then assert as there is no way to recover from this
506 * situation.
507 */
508 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000509
510 /* Get the physical state of the system */
Achin Gupta75f73672013-12-05 16:33:10 +0000511 plat_state = psci_get_phys_state(system_node);
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100512 rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(),
Achin Gupta4f6ad662013-10-25 09:08:21 +0100513 system_node->level,
514 plat_state);
515 assert(rc == PSCI_E_SUCCESS);
516 }
517
Achin Gupta4f6ad662013-10-25 09:08:21 +0100518 return rc;
519}
520
Dan Handleye2712bc2014-04-10 15:37:22 +0100521const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100522 psci_afflvl0_suspend_finish,
523 psci_afflvl1_suspend_finish,
524 psci_afflvl2_suspend_finish,
525};