blob: 0dbd0e0608772cac822406f8dcd86fa87c36c12b [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
32#include <arch_helpers.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010033#include <assert.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010034#include <bl_common.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010035#include <bl31.h>
Soby Mathew74e52a72014-10-02 16:56:51 +010036#include <debug.h>
Achin Gupta0a9f7472014-02-09 17:48:12 +000037#include <context_mgmt.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010038#include <platform.h>
Dan Handleybcd60ba2014-04-17 18:53:42 +010039#include <runtime_svc.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010040#include <stddef.h>
Dan Handley714a0d22014-04-09 13:13:04 +010041#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010042
Soby Mathewffb4ab12014-09-26 15:08:52 +010043typedef int (*afflvl_on_handler_t)(unsigned long target_cpu,
Soby Mathew8595b872015-01-06 15:36:38 +000044 aff_map_node_t *node);
Achin Gupta4f6ad662013-10-25 09:08:21 +010045
46/*******************************************************************************
47 * This function checks whether a cpu which has been requested to be turned on
48 * is OFF to begin with.
49 ******************************************************************************/
Soby Mathew5f2c1b32015-01-12 13:01:31 +000050static int cpu_on_validate_state(unsigned int psci_state)
Achin Gupta4f6ad662013-10-25 09:08:21 +010051{
Achin Gupta4f6ad662013-10-25 09:08:21 +010052 if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
53 return PSCI_E_ALREADY_ON;
54
55 if (psci_state == PSCI_STATE_ON_PENDING)
56 return PSCI_E_ON_PENDING;
57
58 assert(psci_state == PSCI_STATE_OFF);
59 return PSCI_E_SUCCESS;
60}
61
62/*******************************************************************************
63 * Handler routine to turn a cpu on. It takes care of any generic, architectural
64 * or platform specific setup required.
65 * TODO: Split this code across separate handlers for each type of setup?
66 ******************************************************************************/
67static int psci_afflvl0_on(unsigned long target_cpu,
Soby Mathew8595b872015-01-06 15:36:38 +000068 aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +010069{
Achin Gupta4f6ad662013-10-25 09:08:21 +010070 unsigned long psci_entrypoint;
Achin Gupta4f6ad662013-10-25 09:08:21 +010071
72 /* Sanity check to safeguard against data corruption */
73 assert(cpu_node->level == MPIDR_AFFLVL0);
74
Achin Gupta4f6ad662013-10-25 09:08:21 +010075 /* Set the secure world (EL3) re-entry point after BL1 */
76 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
77
78 /*
79 * Plat. management: Give the platform the current state
80 * of the target cpu to allow it to perform the necessary
81 * steps to power on.
82 */
Achin Gupta56bcdc22014-07-28 00:15:23 +010083 return psci_plat_pm_ops->affinst_on(target_cpu,
84 psci_entrypoint,
Achin Gupta56bcdc22014-07-28 00:15:23 +010085 cpu_node->level,
86 psci_get_phys_state(cpu_node));
Achin Gupta4f6ad662013-10-25 09:08:21 +010087}
88
89/*******************************************************************************
90 * Handler routine to turn a cluster on. It takes care or any generic, arch.
91 * or platform specific setup required.
92 * TODO: Split this code across separate handlers for each type of setup?
93 ******************************************************************************/
94static int psci_afflvl1_on(unsigned long target_cpu,
Soby Mathew8595b872015-01-06 15:36:38 +000095 aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +010096{
Achin Gupta4f6ad662013-10-25 09:08:21 +010097 unsigned long psci_entrypoint;
98
99 assert(cluster_node->level == MPIDR_AFFLVL1);
100
101 /*
102 * There is no generic and arch. specific cluster
103 * management required
104 */
105
Achin Gupta75f73672013-12-05 16:33:10 +0000106 /* State management: Is not required while turning a cluster on */
107
Achin Gupta4f6ad662013-10-25 09:08:21 +0100108 /*
109 * Plat. management: Give the platform the current state
110 * of the target cpu to allow it to perform the necessary
111 * steps to power on.
112 */
Achin Gupta56bcdc22014-07-28 00:15:23 +0100113 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
114 return psci_plat_pm_ops->affinst_on(target_cpu,
115 psci_entrypoint,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100116 cluster_node->level,
117 psci_get_phys_state(cluster_node));
Achin Gupta4f6ad662013-10-25 09:08:21 +0100118}
119
120/*******************************************************************************
121 * Handler routine to turn a cluster of clusters on. It takes care or any
122 * generic, arch. or platform specific setup required.
123 * TODO: Split this code across separate handlers for each type of setup?
124 ******************************************************************************/
125static int psci_afflvl2_on(unsigned long target_cpu,
Soby Mathew8595b872015-01-06 15:36:38 +0000126 aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100127{
Achin Gupta4f6ad662013-10-25 09:08:21 +0100128 unsigned long psci_entrypoint;
129
130 /* Cannot go beyond affinity level 2 in this psci imp. */
131 assert(system_node->level == MPIDR_AFFLVL2);
132
133 /*
134 * There is no generic and arch. specific system management
135 * required
136 */
137
Achin Gupta75f73672013-12-05 16:33:10 +0000138 /* State management: Is not required while turning a system on */
139
Achin Gupta4f6ad662013-10-25 09:08:21 +0100140 /*
141 * Plat. management: Give the platform the current state
142 * of the target cpu to allow it to perform the necessary
143 * steps to power on.
144 */
Achin Gupta56bcdc22014-07-28 00:15:23 +0100145 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
146 return psci_plat_pm_ops->affinst_on(target_cpu,
147 psci_entrypoint,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100148 system_node->level,
149 psci_get_phys_state(system_node));
Achin Gupta4f6ad662013-10-25 09:08:21 +0100150}
151
152/* Private data structure to make this handlers accessible through indexing */
Dan Handleye2712bc2014-04-10 15:37:22 +0100153static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100154 psci_afflvl0_on,
155 psci_afflvl1_on,
156 psci_afflvl2_on,
157};
158
159/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000160 * This function takes an array of pointers to affinity instance nodes in the
161 * topology tree and calls the on handler for the corresponding affinity
162 * levels
163 ******************************************************************************/
Achin Gupta56bcdc22014-07-28 00:15:23 +0100164static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[],
Achin Gupta0959db52013-12-02 17:33:04 +0000165 int start_afflvl,
166 int end_afflvl,
Soby Mathew8595b872015-01-06 15:36:38 +0000167 unsigned long target_cpu)
Achin Gupta0959db52013-12-02 17:33:04 +0000168{
169 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleye2712bc2014-04-10 15:37:22 +0100170 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000171
172 for (level = end_afflvl; level >= start_afflvl; level--) {
173 node = target_cpu_nodes[level];
174 if (node == NULL)
175 continue;
176
177 /*
178 * TODO: In case of an error should there be a way
179 * of undoing what we might have setup at higher
180 * affinity levels.
181 */
182 rc = psci_afflvl_on_handlers[level](target_cpu,
Soby Mathew8595b872015-01-06 15:36:38 +0000183 node);
Achin Gupta0959db52013-12-02 17:33:04 +0000184 if (rc != PSCI_E_SUCCESS)
185 break;
186 }
187
188 return rc;
189}
190
191/*******************************************************************************
192 * Generic handler which is called to physically power on a cpu identified by
193 * its mpidr. It traverses through all the affinity levels performing generic,
194 * architectural, platform setup and state management e.g. for a cpu that is
195 * to be powered on, it will ensure that enough information is stashed for it
196 * to resume execution in the non-secure security state.
197 *
198 * The state of all the relevant affinity levels is changed after calling the
199 * affinity level specific handlers as their actions would depend upon the state
200 * the affinity level is currently in.
201 *
202 * The affinity level specific handlers are called in descending order i.e. from
203 * the highest to the lowest affinity level implemented by the platform because
Soby Mathewffb4ab12014-09-26 15:08:52 +0100204 * to turn on affinity level X it is necessary to turn on affinity level X + 1
Achin Gupta0959db52013-12-02 17:33:04 +0000205 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100206 ******************************************************************************/
207int psci_afflvl_on(unsigned long target_cpu,
Soby Mathew8595b872015-01-06 15:36:38 +0000208 entry_point_info_t *ep,
Achin Gupta0959db52013-12-02 17:33:04 +0000209 int start_afflvl,
210 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100211{
Soby Mathew74e52a72014-10-02 16:56:51 +0100212 int rc;
Dan Handleye2712bc2014-04-10 15:37:22 +0100213 mpidr_aff_map_nodes_t target_cpu_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100214
215 /*
Soby Mathew61e615b2015-01-15 11:49:49 +0000216 * This function must only be called on platforms where the
217 * CPU_ON platform hooks have been implemented.
218 */
219 assert(psci_plat_pm_ops->affinst_on &&
220 psci_plat_pm_ops->affinst_on_finish);
221
222 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000223 * Collect the pointers to the nodes in the topology tree for
224 * each affinity instance in the mpidr. If this function does
225 * not return successfully then either the mpidr or the affinity
226 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100227 */
Achin Gupta0959db52013-12-02 17:33:04 +0000228 rc = psci_get_aff_map_nodes(target_cpu,
229 start_afflvl,
230 end_afflvl,
231 target_cpu_nodes);
Soby Mathew74e52a72014-10-02 16:56:51 +0100232 assert(rc == PSCI_E_SUCCESS);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100233
234 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000235 * This function acquires the lock corresponding to each affinity
236 * level so that by the time all locks are taken, the system topology
237 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100238 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100239 psci_acquire_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000240 end_afflvl,
241 target_cpu_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100242
Soby Mathew5f2c1b32015-01-12 13:01:31 +0000243 /*
244 * Generic management: Ensure that the cpu is off to be
245 * turned on.
246 */
247 rc = cpu_on_validate_state(psci_get_state(
Soby Mathew74e52a72014-10-02 16:56:51 +0100248 target_cpu_nodes[MPIDR_AFFLVL0]));
Soby Mathew5f2c1b32015-01-12 13:01:31 +0000249 if (rc != PSCI_E_SUCCESS)
250 goto exit;
251
Soby Mathew74e52a72014-10-02 16:56:51 +0100252 /*
253 * Call the cpu on handler registered by the Secure Payload Dispatcher
254 * to let it do any bookeeping. If the handler encounters an error, it's
255 * expected to assert within
256 */
257 if (psci_spd_pm && psci_spd_pm->svc_on)
258 psci_spd_pm->svc_on(target_cpu);
259
Soby Mathew22fa7e42015-05-11 23:15:06 +0100260 /*
261 * This function updates the state of each affinity instance
262 * corresponding to the mpidr in the range of affinity levels
263 * specified.
264 */
265 psci_do_afflvl_state_mgmt(start_afflvl,
266 end_afflvl,
267 target_cpu_nodes,
268 PSCI_STATE_ON_PENDING);
269
Achin Gupta0959db52013-12-02 17:33:04 +0000270 /* Perform generic, architecture and platform specific handling. */
271 rc = psci_call_on_handlers(target_cpu_nodes,
272 start_afflvl,
273 end_afflvl,
Soby Mathew8595b872015-01-06 15:36:38 +0000274 target_cpu);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100275
Soby Mathew74e52a72014-10-02 16:56:51 +0100276 assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
277
Soby Mathew22fa7e42015-05-11 23:15:06 +0100278 if (rc == PSCI_E_SUCCESS)
279 /* Store the re-entry information for the non-secure world. */
280 cm_init_context(target_cpu, ep);
281 else
282 /* Restore the state on error. */
Achin Guptacab78e42014-07-28 00:09:01 +0100283 psci_do_afflvl_state_mgmt(start_afflvl,
284 end_afflvl,
285 target_cpu_nodes,
Soby Mathew22fa7e42015-05-11 23:15:06 +0100286 PSCI_STATE_OFF);
Soby Mathew5f2c1b32015-01-12 13:01:31 +0000287exit:
Achin Guptacab78e42014-07-28 00:09:01 +0100288 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100289 * This loop releases the lock corresponding to each affinity level
Achin Gupta0959db52013-12-02 17:33:04 +0000290 * in the reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100291 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100292 psci_release_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000293 end_afflvl,
294 target_cpu_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100295
296 return rc;
297}
298
299/*******************************************************************************
300 * The following functions finish an earlier affinity power on request. They
301 * are called by the common finisher routine in psci_common.c.
302 ******************************************************************************/
Soby Mathew74e52a72014-10-02 16:56:51 +0100303static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100304{
Soby Mathew74e52a72014-10-02 16:56:51 +0100305 unsigned int plat_state, state;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100306
307 assert(cpu_node->level == MPIDR_AFFLVL0);
308
Achin Gupta0959db52013-12-02 17:33:04 +0000309 /* Ensure we have been explicitly woken up by another cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000310 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000311 assert(state == PSCI_STATE_ON_PENDING);
312
Achin Gupta4f6ad662013-10-25 09:08:21 +0100313 /*
314 * Plat. management: Perform the platform specific actions
315 * for this cpu e.g. enabling the gic or zeroing the mailbox
316 * register. The actual state of this cpu has already been
317 * changed.
318 */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100319
Soby Mathew74e52a72014-10-02 16:56:51 +0100320 /* Get the physical state of this cpu */
321 plat_state = get_phys_state(state);
322 psci_plat_pm_ops->affinst_on_finish(cpu_node->level,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100323 plat_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100324
325 /*
Achin Guptae1aa5162014-06-26 09:58:52 +0100326 * Arch. management: Enable data cache and manage stack memory
Achin Gupta4f6ad662013-10-25 09:08:21 +0100327 */
Achin Guptae1aa5162014-06-26 09:58:52 +0100328 psci_do_pwrup_cache_maintenance();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100329
330 /*
331 * All the platform specific actions for turning this cpu
332 * on have completed. Perform enough arch.initialization
333 * to run in the non-secure address space.
334 */
335 bl31_arch_setup();
336
337 /*
Achin Gupta607084e2014-02-09 18:24:19 +0000338 * Call the cpu on finish handler registered by the Secure Payload
339 * Dispatcher to let it do any bookeeping. If the handler encounters an
340 * error, it's expected to assert within
341 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000342 if (psci_spd_pm && psci_spd_pm->svc_on_finish)
343 psci_spd_pm->svc_on_finish(0);
Achin Gupta607084e2014-02-09 18:24:19 +0000344
345 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100346 * Generic management: Now we just need to retrieve the
347 * information that we had stashed away during the cpu_on
Andrew Thoelke4e126072014-06-04 21:10:52 +0100348 * call to set this cpu on its way.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100349 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100350 cm_prepare_el3_exit(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100351
352 /* Clean caches before re-entering normal world */
353 dcsw_op_louis(DCCSW);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100354}
355
Soby Mathew74e52a72014-10-02 16:56:51 +0100356static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100357{
Achin Gupta56bcdc22014-07-28 00:15:23 +0100358 unsigned int plat_state;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100359
360 assert(cluster_node->level == MPIDR_AFFLVL1);
361
362 /*
363 * Plat. management: Perform the platform specific actions
364 * as per the old state of the cluster e.g. enabling
365 * coherency at the interconnect depends upon the state with
366 * which this cluster was powered up. If anything goes wrong
367 * then assert as there is no way to recover from this
368 * situation.
369 */
Achin Gupta56bcdc22014-07-28 00:15:23 +0100370 plat_state = psci_get_phys_state(cluster_node);
Soby Mathew74e52a72014-10-02 16:56:51 +0100371 psci_plat_pm_ops->affinst_on_finish(cluster_node->level,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100372 plat_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100373}
374
375
Soby Mathew74e52a72014-10-02 16:56:51 +0100376static void psci_afflvl2_on_finish(aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100377{
Achin Gupta56bcdc22014-07-28 00:15:23 +0100378 unsigned int plat_state;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100379
380 /* Cannot go beyond this affinity level */
381 assert(system_node->level == MPIDR_AFFLVL2);
382
383 /*
384 * Currently, there are no architectural actions to perform
385 * at the system level.
386 */
387
388 /*
389 * Plat. management: Perform the platform specific actions
390 * as per the old state of the cluster e.g. enabling
391 * coherency at the interconnect depends upon the state with
392 * which this cluster was powered up. If anything goes wrong
393 * then assert as there is no way to recover from this
394 * situation.
395 */
Achin Gupta56bcdc22014-07-28 00:15:23 +0100396 plat_state = psci_get_phys_state(system_node);
Soby Mathew74e52a72014-10-02 16:56:51 +0100397 psci_plat_pm_ops->affinst_on_finish(system_node->level,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100398 plat_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100399}
400
Dan Handleye2712bc2014-04-10 15:37:22 +0100401const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100402 psci_afflvl0_on_finish,
403 psci_afflvl1_on_finish,
404 psci_afflvl2_on_finish,
405};