blob: d30bf399bdb0be02517090c2fd8b01d4ec97547b [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Achin Gupta4f6ad662013-10-25 09:08:21 +010031#include <assert.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010032#include <bl_common.h>
33#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010034#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010035#include <context.h>
Achin Guptaef7a28c2014-02-01 08:59:56 +000036#include <context_mgmt.h>
Achin Guptaf3ccbab2014-07-25 14:52:47 +010037#include <cpu_data.h>
Soby Mathew2ed46e92014-07-04 16:02:26 +010038#include <platform.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010039#include <runtime_svc.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010040#include <stddef.h>
Dan Handley714a0d22014-04-09 13:13:04 +010041#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010042
Soby Mathewffb4ab12014-09-26 15:08:52 +010043typedef int (*afflvl_suspend_handler_t)(aff_map_node_t *node,
44 unsigned long ns_entrypoint,
45 unsigned long context_id,
46 unsigned int power_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +010047
48/*******************************************************************************
Achin Guptaf3ccbab2014-07-25 14:52:47 +010049 * This function saves the power state parameter passed in the current PSCI
50 * cpu_suspend call in the per-cpu data array.
Achin Guptaa45e3972013-12-05 15:10:48 +000051 ******************************************************************************/
Achin Guptaf3ccbab2014-07-25 14:52:47 +010052void psci_set_suspend_power_state(unsigned int power_state)
Achin Guptaa45e3972013-12-05 15:10:48 +000053{
Achin Guptaf3ccbab2014-07-25 14:52:47 +010054 set_cpu_data(psci_svc_cpu_data.power_state, power_state);
55 flush_cpu_data(psci_svc_cpu_data.power_state);
Achin Guptaa45e3972013-12-05 15:10:48 +000056}
57
58/*******************************************************************************
Achin Guptaf3ccbab2014-07-25 14:52:47 +010059 * This function gets the affinity level till which the current cpu could be
60 * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
61 * power state is invalid.
Vikram Kanigirif100f412014-04-01 19:26:26 +010062 ******************************************************************************/
Achin Guptaf3ccbab2014-07-25 14:52:47 +010063int psci_get_suspend_afflvl()
Vikram Kanigirif100f412014-04-01 19:26:26 +010064{
Achin Guptaf3ccbab2014-07-25 14:52:47 +010065 unsigned int power_state;
Vikram Kanigirif100f412014-04-01 19:26:26 +010066
Achin Guptaf3ccbab2014-07-25 14:52:47 +010067 power_state = get_cpu_data(psci_svc_cpu_data.power_state);
Vikram Kanigirif100f412014-04-01 19:26:26 +010068
Achin Guptaf3ccbab2014-07-25 14:52:47 +010069 return ((power_state == PSCI_INVALID_DATA) ?
70 power_state : psci_get_pstate_afflvl(power_state));
Vikram Kanigirif100f412014-04-01 19:26:26 +010071}
72
Vikram Kanigirif100f412014-04-01 19:26:26 +010073/*******************************************************************************
Achin Guptaf3ccbab2014-07-25 14:52:47 +010074 * This function gets the state id of the current cpu from the power state
75 * parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the
76 * power state saved is invalid.
Vikram Kanigirif100f412014-04-01 19:26:26 +010077 ******************************************************************************/
Achin Guptaf3ccbab2014-07-25 14:52:47 +010078int psci_get_suspend_stateid()
Vikram Kanigirif100f412014-04-01 19:26:26 +010079{
80 unsigned int power_state;
81
Achin Guptaf3ccbab2014-07-25 14:52:47 +010082 power_state = get_cpu_data(psci_svc_cpu_data.power_state);
Vikram Kanigirif100f412014-04-01 19:26:26 +010083
Vikram Kanigirif100f412014-04-01 19:26:26 +010084 return ((power_state == PSCI_INVALID_DATA) ?
Achin Guptaf3ccbab2014-07-25 14:52:47 +010085 power_state : psci_get_pstate_id(power_state));
Vikram Kanigirif100f412014-04-01 19:26:26 +010086}
87
88/*******************************************************************************
Achin Guptaf3ccbab2014-07-25 14:52:47 +010089 * This function gets the state id of the cpu specified by the 'mpidr' parameter
90 * from the power state parameter saved in the per-cpu data array. Returns
91 * PSCI_INVALID_DATA if the power state saved is invalid.
Achin Guptaa45e3972013-12-05 15:10:48 +000092 ******************************************************************************/
Achin Guptaf3ccbab2014-07-25 14:52:47 +010093int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
Achin Guptaa45e3972013-12-05 15:10:48 +000094{
Vikram Kanigirif100f412014-04-01 19:26:26 +010095 unsigned int power_state;
96
Achin Guptaf3ccbab2014-07-25 14:52:47 +010097 power_state = get_cpu_data_by_mpidr(mpidr,
98 psci_svc_cpu_data.power_state);
Vikram Kanigirif100f412014-04-01 19:26:26 +010099
Vikram Kanigirif100f412014-04-01 19:26:26 +0100100 return ((power_state == PSCI_INVALID_DATA) ?
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100101 power_state : psci_get_pstate_id(power_state));
Achin Guptaa45e3972013-12-05 15:10:48 +0000102}
103
104/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100105 * The next three functions implement a handler for each supported affinity
106 * level which is called when that affinity level is about to be suspended.
107 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100108static int psci_afflvl0_suspend(aff_map_node_t *cpu_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100109 unsigned long ns_entrypoint,
110 unsigned long context_id,
111 unsigned int power_state)
112{
Achin Guptae1aa5162014-06-26 09:58:52 +0100113 unsigned long psci_entrypoint;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100114 uint32_t ns_scr_el3 = read_scr_el3();
115 uint32_t ns_sctlr_el1 = read_sctlr_el1();
116 int rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100117
118 /* Sanity check to safeguard against data corruption */
119 assert(cpu_node->level == MPIDR_AFFLVL0);
120
Vikram Kanigirif100f412014-04-01 19:26:26 +0100121 /* Save PSCI power state parameter for the core in suspend context */
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100122 psci_set_suspend_power_state(power_state);
Vikram Kanigirif100f412014-04-01 19:26:26 +0100123
Achin Gupta607084e2014-02-09 18:24:19 +0000124 /*
125 * Generic management: Store the re-entry information for the non-secure
126 * world and allow the secure world to suspend itself
127 */
128
129 /*
130 * Call the cpu suspend handler registered by the Secure Payload
131 * Dispatcher to let it do any bookeeping. If the handler encounters an
132 * error, it's expected to assert within
133 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000134 if (psci_spd_pm && psci_spd_pm->svc_suspend)
135 psci_spd_pm->svc_suspend(power_state);
Achin Gupta607084e2014-02-09 18:24:19 +0000136
Achin Gupta4f6ad662013-10-25 09:08:21 +0100137 /*
138 * Generic management: Store the re-entry information for the
139 * non-secure world
140 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100141 rc = psci_save_ns_entry(read_mpidr_el1(), ns_entrypoint, context_id,
142 ns_scr_el3, ns_sctlr_el1);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100143 if (rc != PSCI_E_SUCCESS)
144 return rc;
145
Achin Gupta4f6ad662013-10-25 09:08:21 +0100146 /* Set the secure world (EL3) re-entry point after BL1 */
147 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
148
149 /*
150 * Arch. management. Perform the necessary steps to flush all
151 * cpu caches.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100152 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100153 psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100154
Achin Gupta56bcdc22014-07-28 00:15:23 +0100155 if (!psci_plat_pm_ops->affinst_suspend)
156 return PSCI_E_SUCCESS;
157
Achin Gupta4f6ad662013-10-25 09:08:21 +0100158 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100159 * Plat. management: Allow the platform to perform the
160 * necessary actions to turn off this cpu e.g. set the
161 * platform defined mailbox with the psci entrypoint,
162 * program the power controller etc.
163 */
Soby Mathewffb4ab12014-09-26 15:08:52 +0100164 return psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100165 cpu_node->level,
166 psci_get_phys_state(cpu_node));
Achin Gupta4f6ad662013-10-25 09:08:21 +0100167}
168
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100169static int psci_afflvl1_suspend(aff_map_node_t *cluster_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100170 unsigned long ns_entrypoint,
171 unsigned long context_id,
172 unsigned int power_state)
173{
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174 unsigned int plat_state;
175 unsigned long psci_entrypoint;
176
177 /* Sanity check the cluster level */
178 assert(cluster_node->level == MPIDR_AFFLVL1);
179
180 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100181 * Arch. management: Flush all levels of caches to PoC if the
Achin Guptaf6b9e992014-07-31 11:19:11 +0100182 * cluster is to be shutdown.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100183 */
Achin Guptaf6b9e992014-07-31 11:19:11 +0100184 psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100185
Achin Gupta56bcdc22014-07-28 00:15:23 +0100186 if (!psci_plat_pm_ops->affinst_suspend)
187 return PSCI_E_SUCCESS;
188
Achin Gupta4f6ad662013-10-25 09:08:21 +0100189 /*
Achin Gupta56bcdc22014-07-28 00:15:23 +0100190 * Plat. Management. Allow the platform to do its cluster specific
191 * bookeeping e.g. turn off interconnect coherency, program the power
192 * controller etc. Sending the psci entrypoint is currently redundant
193 * beyond affinity level 0 but one never knows what a platform might
194 * do. Also it allows us to keep the platform handler prototype the
195 * same.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100196 */
Achin Gupta56bcdc22014-07-28 00:15:23 +0100197 plat_state = psci_get_phys_state(cluster_node);
198 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Soby Mathewffb4ab12014-09-26 15:08:52 +0100199 return psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100200 cluster_node->level,
201 plat_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100202}
203
204
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100205static int psci_afflvl2_suspend(aff_map_node_t *system_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100206 unsigned long ns_entrypoint,
207 unsigned long context_id,
208 unsigned int power_state)
209{
Achin Gupta4f6ad662013-10-25 09:08:21 +0100210 unsigned int plat_state;
211 unsigned long psci_entrypoint;
212
213 /* Cannot go beyond this */
214 assert(system_node->level == MPIDR_AFFLVL2);
215
216 /*
217 * Keep the physical state of the system handy to decide what
218 * action needs to be taken
219 */
Achin Gupta75f73672013-12-05 16:33:10 +0000220 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100221
222 /*
Achin Guptaf6b9e992014-07-31 11:19:11 +0100223 * Arch. management: Flush all levels of caches to PoC if the
224 * system is to be shutdown.
225 */
226 psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
227
228 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000229 * Plat. Management : Allow the platform to do its bookeeping
Achin Gupta4f6ad662013-10-25 09:08:21 +0100230 * at this affinity level
231 */
Achin Gupta56bcdc22014-07-28 00:15:23 +0100232 if (!psci_plat_pm_ops->affinst_suspend)
233 return PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100234
Achin Gupta56bcdc22014-07-28 00:15:23 +0100235 /*
236 * Sending the psci entrypoint is currently redundant
237 * beyond affinity level 0 but one never knows what a
238 * platform might do. Also it allows us to keep the
239 * platform handler prototype the same.
240 */
241 plat_state = psci_get_phys_state(system_node);
242 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Soby Mathewffb4ab12014-09-26 15:08:52 +0100243 return psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100244 system_node->level,
245 plat_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100246}
247
Dan Handleye2712bc2014-04-10 15:37:22 +0100248static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100249 psci_afflvl0_suspend,
250 psci_afflvl1_suspend,
251 psci_afflvl2_suspend,
252};
253
254/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000255 * This function takes an array of pointers to affinity instance nodes in the
256 * topology tree and calls the suspend handler for the corresponding affinity
257 * levels
258 ******************************************************************************/
Achin Gupta56bcdc22014-07-28 00:15:23 +0100259static int psci_call_suspend_handlers(aff_map_node_t *mpidr_nodes[],
Achin Gupta0959db52013-12-02 17:33:04 +0000260 int start_afflvl,
261 int end_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000262 unsigned long entrypoint,
263 unsigned long context_id,
264 unsigned int power_state)
265{
266 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleye2712bc2014-04-10 15:37:22 +0100267 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000268
269 for (level = start_afflvl; level <= end_afflvl; level++) {
270 node = mpidr_nodes[level];
271 if (node == NULL)
272 continue;
273
274 /*
275 * TODO: In case of an error should there be a way
276 * of restoring what we might have torn down at
277 * lower affinity levels.
278 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100279 rc = psci_afflvl_suspend_handlers[level](node,
Achin Gupta0959db52013-12-02 17:33:04 +0000280 entrypoint,
281 context_id,
282 power_state);
283 if (rc != PSCI_E_SUCCESS)
284 break;
285 }
286
287 return rc;
288}
289
290/*******************************************************************************
291 * Top level handler which is called when a cpu wants to suspend its execution.
292 * It is assumed that along with turning the cpu off, higher affinity levels
293 * until the target affinity level will be turned off as well. It traverses
294 * through all the affinity levels performing generic, architectural, platform
295 * setup and state management e.g. for a cluster that's to be suspended, it will
296 * call the platform specific code which will disable coherency at the
297 * interconnect level if the cpu is the last in the cluster. For a cpu it could
298 * mean programming the power controller etc.
299 *
300 * The state of all the relevant affinity levels is changed prior to calling the
301 * affinity level specific handlers as their actions would depend upon the state
302 * the affinity level is about to enter.
303 *
304 * The affinity level specific handlers are called in ascending order i.e. from
305 * the lowest to the highest affinity level implemented by the platform because
306 * to turn off affinity level X it is neccesary to turn off affinity level X - 1
307 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100308 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100309int psci_afflvl_suspend(unsigned long entrypoint,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100310 unsigned long context_id,
311 unsigned int power_state,
Achin Gupta0959db52013-12-02 17:33:04 +0000312 int start_afflvl,
313 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100314{
Achin Gupta0959db52013-12-02 17:33:04 +0000315 int rc = PSCI_E_SUCCESS;
Dan Handleye2712bc2014-04-10 15:37:22 +0100316 mpidr_aff_map_nodes_t mpidr_nodes;
Achin Guptaf6b9e992014-07-31 11:19:11 +0100317 unsigned int max_phys_off_afflvl;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100318
Achin Gupta4f6ad662013-10-25 09:08:21 +0100319 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000320 * Collect the pointers to the nodes in the topology tree for
321 * each affinity instance in the mpidr. If this function does
322 * not return successfully then either the mpidr or the affinity
323 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100324 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100325 rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
Achin Gupta0959db52013-12-02 17:33:04 +0000326 start_afflvl,
327 end_afflvl,
328 mpidr_nodes);
329 if (rc != PSCI_E_SUCCESS)
330 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100331
332 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000333 * This function acquires the lock corresponding to each affinity
334 * level so that by the time all locks are taken, the system topology
335 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100336 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100337 psci_acquire_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000338 end_afflvl,
339 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100340
Achin Guptacab78e42014-07-28 00:09:01 +0100341 /*
342 * This function updates the state of each affinity instance
343 * corresponding to the mpidr in the range of affinity levels
344 * specified.
345 */
346 psci_do_afflvl_state_mgmt(start_afflvl,
347 end_afflvl,
348 mpidr_nodes,
349 PSCI_STATE_SUSPEND);
Achin Guptaf6b9e992014-07-31 11:19:11 +0100350
351 max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
352 end_afflvl,
353 mpidr_nodes);
354 assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
355
356 /* Stash the highest affinity level that will be turned off */
357 psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
358
Achin Gupta0959db52013-12-02 17:33:04 +0000359 /* Perform generic, architecture and platform specific handling */
360 rc = psci_call_suspend_handlers(mpidr_nodes,
361 start_afflvl,
362 end_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000363 entrypoint,
364 context_id,
365 power_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100366
367 /*
Achin Guptaf6b9e992014-07-31 11:19:11 +0100368 * Invalidate the entry for the highest affinity level stashed earlier.
369 * This ensures that any reads of this variable outside the power
370 * up/down sequences return PSCI_INVALID_DATA.
371 */
372 psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
373
374 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000375 * Release the locks corresponding to each affinity level in the
376 * reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100377 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100378 psci_release_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000379 end_afflvl,
380 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100381
Achin Gupta4f6ad662013-10-25 09:08:21 +0100382 return rc;
383}
384
385/*******************************************************************************
386 * The following functions finish an earlier affinity suspend request. They
387 * are called by the common finisher routine in psci_common.c.
388 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100389static unsigned int psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100390{
Andrew Thoelke4e126072014-06-04 21:10:52 +0100391 unsigned int plat_state, state, rc;
Achin Gupta607084e2014-02-09 18:24:19 +0000392 int32_t suspend_level;
Soby Mathew2ed46e92014-07-04 16:02:26 +0100393 uint64_t counter_freq;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100394
395 assert(cpu_node->level == MPIDR_AFFLVL0);
396
Achin Gupta0959db52013-12-02 17:33:04 +0000397 /* Ensure we have been woken up from a suspended state */
Achin Gupta75f73672013-12-05 16:33:10 +0000398 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000399 assert(state == PSCI_STATE_SUSPEND);
400
Achin Gupta4f6ad662013-10-25 09:08:21 +0100401 /*
402 * Plat. management: Perform the platform specific actions
403 * before we change the state of the cpu e.g. enabling the
404 * gic or zeroing the mailbox register. If anything goes
405 * wrong then assert as there is no way to recover from this
406 * situation.
407 */
408 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000409
410 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000411 plat_state = get_phys_state(state);
Soby Mathewffb4ab12014-09-26 15:08:52 +0100412 rc = psci_plat_pm_ops->affinst_suspend_finish(cpu_node->level,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100413 plat_state);
414 assert(rc == PSCI_E_SUCCESS);
415 }
416
417 /* Get the index for restoring the re-entry information */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100418 /*
Achin Guptae1aa5162014-06-26 09:58:52 +0100419 * Arch. management: Enable the data cache, manage stack memory and
420 * restore the stashed EL3 architectural context from the 'cpu_context'
421 * structure for this cpu.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100422 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100423 psci_do_pwrup_cache_maintenance();
Soby Mathew2ed46e92014-07-04 16:02:26 +0100424
425 /* Re-init the cntfrq_el0 register */
426 counter_freq = plat_get_syscnt_freq();
427 write_cntfrq_el0(counter_freq);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100428
429 /*
Achin Gupta607084e2014-02-09 18:24:19 +0000430 * Call the cpu suspend finish handler registered by the Secure Payload
431 * Dispatcher to let it do any bookeeping. If the handler encounters an
432 * error, it's expected to assert within
433 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000434 if (psci_spd_pm && psci_spd_pm->svc_suspend) {
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100435 suspend_level = psci_get_suspend_afflvl();
Vikram Kanigirif100f412014-04-01 19:26:26 +0100436 assert (suspend_level != PSCI_INVALID_DATA);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000437 psci_spd_pm->svc_suspend_finish(suspend_level);
Achin Gupta607084e2014-02-09 18:24:19 +0000438 }
439
Vikram Kanigirif100f412014-04-01 19:26:26 +0100440 /* Invalidate the suspend context for the node */
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100441 psci_set_suspend_power_state(PSCI_INVALID_DATA);
Vikram Kanigirif100f412014-04-01 19:26:26 +0100442
Achin Gupta607084e2014-02-09 18:24:19 +0000443 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100444 * Generic management: Now we just need to retrieve the
445 * information that we had stashed away during the suspend
Achin Gupta3140a9e2013-12-02 16:23:12 +0000446 * call to set this cpu on its way.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100447 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100448 cm_prepare_el3_exit(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100449
450 /* Clean caches before re-entering normal world */
451 dcsw_op_louis(DCCSW);
452
Andrew Thoelke4e126072014-06-04 21:10:52 +0100453 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100454 return rc;
455}
456
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100457static unsigned int psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100458{
Achin Gupta0959db52013-12-02 17:33:04 +0000459 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100460
461 assert(cluster_node->level == MPIDR_AFFLVL1);
462
463 /*
464 * Plat. management: Perform the platform specific actions
465 * as per the old state of the cluster e.g. enabling
466 * coherency at the interconnect depends upon the state with
467 * which this cluster was powered up. If anything goes wrong
468 * then assert as there is no way to recover from this
469 * situation.
470 */
471 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000472
473 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000474 plat_state = psci_get_phys_state(cluster_node);
Soby Mathewffb4ab12014-09-26 15:08:52 +0100475 rc = psci_plat_pm_ops->affinst_suspend_finish(cluster_node->level,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100476 plat_state);
477 assert(rc == PSCI_E_SUCCESS);
478 }
479
480 return rc;
481}
482
483
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100484static unsigned int psci_afflvl2_suspend_finish(aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100485{
Achin Gupta0959db52013-12-02 17:33:04 +0000486 unsigned int plat_state, rc = PSCI_E_SUCCESS;;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100487
488 /* Cannot go beyond this affinity level */
489 assert(system_node->level == MPIDR_AFFLVL2);
490
491 /*
492 * Currently, there are no architectural actions to perform
493 * at the system level.
494 */
495
496 /*
497 * Plat. management: Perform the platform specific actions
498 * as per the old state of the cluster e.g. enabling
499 * coherency at the interconnect depends upon the state with
500 * which this cluster was powered up. If anything goes wrong
501 * then assert as there is no way to recover from this
502 * situation.
503 */
504 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000505
506 /* Get the physical state of the system */
Achin Gupta75f73672013-12-05 16:33:10 +0000507 plat_state = psci_get_phys_state(system_node);
Soby Mathewffb4ab12014-09-26 15:08:52 +0100508 rc = psci_plat_pm_ops->affinst_suspend_finish(system_node->level,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100509 plat_state);
510 assert(rc == PSCI_E_SUCCESS);
511 }
512
Achin Gupta4f6ad662013-10-25 09:08:21 +0100513 return rc;
514}
515
Dan Handleye2712bc2014-04-10 15:37:22 +0100516const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100517 psci_afflvl0_suspend_finish,
518 psci_afflvl1_suspend_finish,
519 psci_afflvl2_suspend_finish,
520};