blob: 0dbd0e0608772cac822406f8dcd86fa87c36c12b [file] [log] [blame]
Soby Mathew991d42c2015-06-29 16:30:12 +01001/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <bl_common.h>
35#include <bl31.h>
36#include <debug.h>
37#include <context_mgmt.h>
38#include <platform.h>
39#include <runtime_svc.h>
40#include <stddef.h>
41#include "psci_private.h"
42
43typedef int (*afflvl_on_handler_t)(unsigned long target_cpu,
44 aff_map_node_t *node);
45
46/*******************************************************************************
47 * This function checks whether a cpu which has been requested to be turned on
48 * is OFF to begin with.
49 ******************************************************************************/
50static int cpu_on_validate_state(unsigned int psci_state)
51{
52 if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
53 return PSCI_E_ALREADY_ON;
54
55 if (psci_state == PSCI_STATE_ON_PENDING)
56 return PSCI_E_ON_PENDING;
57
58 assert(psci_state == PSCI_STATE_OFF);
59 return PSCI_E_SUCCESS;
60}
61
62/*******************************************************************************
63 * Handler routine to turn a cpu on. It takes care of any generic, architectural
64 * or platform specific setup required.
65 * TODO: Split this code across separate handlers for each type of setup?
66 ******************************************************************************/
67static int psci_afflvl0_on(unsigned long target_cpu,
68 aff_map_node_t *cpu_node)
69{
70 unsigned long psci_entrypoint;
71
72 /* Sanity check to safeguard against data corruption */
73 assert(cpu_node->level == MPIDR_AFFLVL0);
74
75 /* Set the secure world (EL3) re-entry point after BL1 */
76 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
77
78 /*
79 * Plat. management: Give the platform the current state
80 * of the target cpu to allow it to perform the necessary
81 * steps to power on.
82 */
83 return psci_plat_pm_ops->affinst_on(target_cpu,
84 psci_entrypoint,
85 cpu_node->level,
86 psci_get_phys_state(cpu_node));
87}
88
89/*******************************************************************************
90 * Handler routine to turn a cluster on. It takes care or any generic, arch.
91 * or platform specific setup required.
92 * TODO: Split this code across separate handlers for each type of setup?
93 ******************************************************************************/
94static int psci_afflvl1_on(unsigned long target_cpu,
95 aff_map_node_t *cluster_node)
96{
97 unsigned long psci_entrypoint;
98
99 assert(cluster_node->level == MPIDR_AFFLVL1);
100
101 /*
102 * There is no generic and arch. specific cluster
103 * management required
104 */
105
106 /* State management: Is not required while turning a cluster on */
107
108 /*
109 * Plat. management: Give the platform the current state
110 * of the target cpu to allow it to perform the necessary
111 * steps to power on.
112 */
113 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
114 return psci_plat_pm_ops->affinst_on(target_cpu,
115 psci_entrypoint,
116 cluster_node->level,
117 psci_get_phys_state(cluster_node));
118}
119
120/*******************************************************************************
121 * Handler routine to turn a cluster of clusters on. It takes care or any
122 * generic, arch. or platform specific setup required.
123 * TODO: Split this code across separate handlers for each type of setup?
124 ******************************************************************************/
125static int psci_afflvl2_on(unsigned long target_cpu,
126 aff_map_node_t *system_node)
127{
128 unsigned long psci_entrypoint;
129
130 /* Cannot go beyond affinity level 2 in this psci imp. */
131 assert(system_node->level == MPIDR_AFFLVL2);
132
133 /*
134 * There is no generic and arch. specific system management
135 * required
136 */
137
138 /* State management: Is not required while turning a system on */
139
140 /*
141 * Plat. management: Give the platform the current state
142 * of the target cpu to allow it to perform the necessary
143 * steps to power on.
144 */
145 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
146 return psci_plat_pm_ops->affinst_on(target_cpu,
147 psci_entrypoint,
148 system_node->level,
149 psci_get_phys_state(system_node));
150}
151
152/* Private data structure to make this handlers accessible through indexing */
153static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
154 psci_afflvl0_on,
155 psci_afflvl1_on,
156 psci_afflvl2_on,
157};
158
159/*******************************************************************************
160 * This function takes an array of pointers to affinity instance nodes in the
161 * topology tree and calls the on handler for the corresponding affinity
162 * levels
163 ******************************************************************************/
164static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[],
165 int start_afflvl,
166 int end_afflvl,
167 unsigned long target_cpu)
168{
169 int rc = PSCI_E_INVALID_PARAMS, level;
170 aff_map_node_t *node;
171
172 for (level = end_afflvl; level >= start_afflvl; level--) {
173 node = target_cpu_nodes[level];
174 if (node == NULL)
175 continue;
176
177 /*
178 * TODO: In case of an error should there be a way
179 * of undoing what we might have setup at higher
180 * affinity levels.
181 */
182 rc = psci_afflvl_on_handlers[level](target_cpu,
183 node);
184 if (rc != PSCI_E_SUCCESS)
185 break;
186 }
187
188 return rc;
189}
190
191/*******************************************************************************
192 * Generic handler which is called to physically power on a cpu identified by
193 * its mpidr. It traverses through all the affinity levels performing generic,
194 * architectural, platform setup and state management e.g. for a cpu that is
195 * to be powered on, it will ensure that enough information is stashed for it
196 * to resume execution in the non-secure security state.
197 *
198 * The state of all the relevant affinity levels is changed after calling the
199 * affinity level specific handlers as their actions would depend upon the state
200 * the affinity level is currently in.
201 *
202 * The affinity level specific handlers are called in descending order i.e. from
203 * the highest to the lowest affinity level implemented by the platform because
204 * to turn on affinity level X it is necessary to turn on affinity level X + 1
205 * first.
206 ******************************************************************************/
207int psci_afflvl_on(unsigned long target_cpu,
208 entry_point_info_t *ep,
209 int start_afflvl,
210 int end_afflvl)
211{
212 int rc;
213 mpidr_aff_map_nodes_t target_cpu_nodes;
214
215 /*
216 * This function must only be called on platforms where the
217 * CPU_ON platform hooks have been implemented.
218 */
219 assert(psci_plat_pm_ops->affinst_on &&
220 psci_plat_pm_ops->affinst_on_finish);
221
222 /*
223 * Collect the pointers to the nodes in the topology tree for
224 * each affinity instance in the mpidr. If this function does
225 * not return successfully then either the mpidr or the affinity
226 * levels are incorrect.
227 */
228 rc = psci_get_aff_map_nodes(target_cpu,
229 start_afflvl,
230 end_afflvl,
231 target_cpu_nodes);
232 assert(rc == PSCI_E_SUCCESS);
233
234 /*
235 * This function acquires the lock corresponding to each affinity
236 * level so that by the time all locks are taken, the system topology
237 * is snapshot and state management can be done safely.
238 */
239 psci_acquire_afflvl_locks(start_afflvl,
240 end_afflvl,
241 target_cpu_nodes);
242
243 /*
244 * Generic management: Ensure that the cpu is off to be
245 * turned on.
246 */
247 rc = cpu_on_validate_state(psci_get_state(
248 target_cpu_nodes[MPIDR_AFFLVL0]));
249 if (rc != PSCI_E_SUCCESS)
250 goto exit;
251
252 /*
253 * Call the cpu on handler registered by the Secure Payload Dispatcher
254 * to let it do any bookeeping. If the handler encounters an error, it's
255 * expected to assert within
256 */
257 if (psci_spd_pm && psci_spd_pm->svc_on)
258 psci_spd_pm->svc_on(target_cpu);
259
260 /*
261 * This function updates the state of each affinity instance
262 * corresponding to the mpidr in the range of affinity levels
263 * specified.
264 */
265 psci_do_afflvl_state_mgmt(start_afflvl,
266 end_afflvl,
267 target_cpu_nodes,
268 PSCI_STATE_ON_PENDING);
269
270 /* Perform generic, architecture and platform specific handling. */
271 rc = psci_call_on_handlers(target_cpu_nodes,
272 start_afflvl,
273 end_afflvl,
274 target_cpu);
275
276 assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
277
278 if (rc == PSCI_E_SUCCESS)
279 /* Store the re-entry information for the non-secure world. */
280 cm_init_context(target_cpu, ep);
281 else
282 /* Restore the state on error. */
283 psci_do_afflvl_state_mgmt(start_afflvl,
284 end_afflvl,
285 target_cpu_nodes,
286 PSCI_STATE_OFF);
287exit:
288 /*
289 * This loop releases the lock corresponding to each affinity level
290 * in the reverse order to which they were acquired.
291 */
292 psci_release_afflvl_locks(start_afflvl,
293 end_afflvl,
294 target_cpu_nodes);
295
296 return rc;
297}
298
299/*******************************************************************************
300 * The following functions finish an earlier affinity power on request. They
301 * are called by the common finisher routine in psci_common.c.
302 ******************************************************************************/
303static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
304{
305 unsigned int plat_state, state;
306
307 assert(cpu_node->level == MPIDR_AFFLVL0);
308
309 /* Ensure we have been explicitly woken up by another cpu */
310 state = psci_get_state(cpu_node);
311 assert(state == PSCI_STATE_ON_PENDING);
312
313 /*
314 * Plat. management: Perform the platform specific actions
315 * for this cpu e.g. enabling the gic or zeroing the mailbox
316 * register. The actual state of this cpu has already been
317 * changed.
318 */
319
320 /* Get the physical state of this cpu */
321 plat_state = get_phys_state(state);
322 psci_plat_pm_ops->affinst_on_finish(cpu_node->level,
323 plat_state);
324
325 /*
326 * Arch. management: Enable data cache and manage stack memory
327 */
328 psci_do_pwrup_cache_maintenance();
329
330 /*
331 * All the platform specific actions for turning this cpu
332 * on have completed. Perform enough arch.initialization
333 * to run in the non-secure address space.
334 */
335 bl31_arch_setup();
336
337 /*
338 * Call the cpu on finish handler registered by the Secure Payload
339 * Dispatcher to let it do any bookeeping. If the handler encounters an
340 * error, it's expected to assert within
341 */
342 if (psci_spd_pm && psci_spd_pm->svc_on_finish)
343 psci_spd_pm->svc_on_finish(0);
344
345 /*
346 * Generic management: Now we just need to retrieve the
347 * information that we had stashed away during the cpu_on
348 * call to set this cpu on its way.
349 */
350 cm_prepare_el3_exit(NON_SECURE);
351
352 /* Clean caches before re-entering normal world */
353 dcsw_op_louis(DCCSW);
354}
355
356static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node)
357{
358 unsigned int plat_state;
359
360 assert(cluster_node->level == MPIDR_AFFLVL1);
361
362 /*
363 * Plat. management: Perform the platform specific actions
364 * as per the old state of the cluster e.g. enabling
365 * coherency at the interconnect depends upon the state with
366 * which this cluster was powered up. If anything goes wrong
367 * then assert as there is no way to recover from this
368 * situation.
369 */
370 plat_state = psci_get_phys_state(cluster_node);
371 psci_plat_pm_ops->affinst_on_finish(cluster_node->level,
372 plat_state);
373}
374
375
376static void psci_afflvl2_on_finish(aff_map_node_t *system_node)
377{
378 unsigned int plat_state;
379
380 /* Cannot go beyond this affinity level */
381 assert(system_node->level == MPIDR_AFFLVL2);
382
383 /*
384 * Currently, there are no architectural actions to perform
385 * at the system level.
386 */
387
388 /*
389 * Plat. management: Perform the platform specific actions
390 * as per the old state of the cluster e.g. enabling
391 * coherency at the interconnect depends upon the state with
392 * which this cluster was powered up. If anything goes wrong
393 * then assert as there is no way to recover from this
394 * situation.
395 */
396 plat_state = psci_get_phys_state(system_node);
397 psci_plat_pm_ops->affinst_on_finish(system_node->level,
398 plat_state);
399}
400
401const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
402 psci_afflvl0_on_finish,
403 psci_afflvl1_on_finish,
404 psci_afflvl2_on_finish,
405};