blob: 1b74ff2c5cad80a3da80b76eec10042bce3344eb [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <assert.h>
34#include <bl_common.h>
35#include <context.h>
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000036#include <context_mgmt.h>
Dan Handley714a0d22014-04-09 13:13:04 +010037#include <debug.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010038#include <platform.h>
Andrew Thoelke4e126072014-06-04 21:10:52 +010039#include <string.h>
Dan Handley714a0d22014-04-09 13:13:04 +010040#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010041
Achin Gupta607084e2014-02-09 18:24:19 +000042/*
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000043 * SPD power management operations, expected to be supplied by the registered
44 * SPD on successful SP initialization
Achin Gupta607084e2014-02-09 18:24:19 +000045 */
Dan Handleye2712bc2014-04-10 15:37:22 +010046const spd_pm_ops_t *psci_spd_pm;
Achin Gupta607084e2014-02-09 18:24:19 +000047
Achin Gupta4f6ad662013-10-25 09:08:21 +010048/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +010049 * Grand array that holds the platform's topology information for state
50 * management of affinity instances. Each node (aff_map_node) in the array
51 * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
52 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +010053aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]
Soby Mathew2ae20432015-01-08 18:02:44 +000054#if USE_COHERENT_MEM
55__attribute__ ((section("tzfw_coherent_mem")))
56#endif
57;
Achin Gupta4f6ad662013-10-25 09:08:21 +010058
59/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +010060 * Pointer to functions exported by the platform to complete power mgmt. ops
61 ******************************************************************************/
Dan Handleya4cb68e2014-04-23 13:47:06 +010062const plat_pm_ops_t *psci_plat_pm_ops;
Achin Gupta4f6ad662013-10-25 09:08:21 +010063
64/*******************************************************************************
Soby Mathew2b7de2b2015-02-12 14:45:02 +000065 * Check that the maximum affinity level supported by the platform makes sense
66 * ****************************************************************************/
67CASSERT(PLATFORM_MAX_AFFLVL <= MPIDR_MAX_AFFLVL && \
68 PLATFORM_MAX_AFFLVL >= MPIDR_AFFLVL0, \
69 assert_platform_max_afflvl_check);
70
71/*******************************************************************************
Achin Guptaf6b9e992014-07-31 11:19:11 +010072 * This function is passed an array of pointers to affinity level nodes in the
73 * topology tree for an mpidr. It iterates through the nodes to find the highest
74 * affinity level which is marked as physically powered off.
75 ******************************************************************************/
76uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
77 uint32_t end_afflvl,
Achin Gupta56bcdc22014-07-28 00:15:23 +010078 aff_map_node_t *mpidr_nodes[])
Achin Guptaf6b9e992014-07-31 11:19:11 +010079{
80 uint32_t max_afflvl = PSCI_INVALID_DATA;
81
82 for (; start_afflvl <= end_afflvl; start_afflvl++) {
83 if (mpidr_nodes[start_afflvl] == NULL)
84 continue;
85
86 if (psci_get_phys_state(mpidr_nodes[start_afflvl]) ==
87 PSCI_STATE_OFF)
88 max_afflvl = start_afflvl;
89 }
90
91 return max_afflvl;
92}
93
94/*******************************************************************************
Soby Mathew96168382014-12-17 14:47:57 +000095 * This function verifies that the all the other cores in the system have been
96 * turned OFF and the current CPU is the last running CPU in the system.
97 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
98 * otherwise.
99 ******************************************************************************/
100unsigned int psci_is_last_on_cpu(void)
101{
102 unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
103 unsigned int i;
104
105 for (i = psci_aff_limits[MPIDR_AFFLVL0].min;
106 i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) {
107
108 assert(psci_aff_map[i].level == MPIDR_AFFLVL0);
109
110 if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT))
111 continue;
112
113 if (psci_aff_map[i].mpidr == mpidr) {
114 assert(psci_get_state(&psci_aff_map[i])
115 == PSCI_STATE_ON);
116 continue;
117 }
118
119 if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF)
120 return 0;
121 }
122
123 return 1;
124}
125
126/*******************************************************************************
Achin Guptaf6b9e992014-07-31 11:19:11 +0100127 * This function saves the highest affinity level which is in OFF state. The
128 * affinity instance with which the level is associated is determined by the
129 * caller.
130 ******************************************************************************/
131void psci_set_max_phys_off_afflvl(uint32_t afflvl)
132{
133 set_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl, afflvl);
134
135 /*
136 * Ensure that the saved value is flushed to main memory and any
137 * speculatively pre-fetched stale copies are invalidated from the
138 * caches of other cpus in the same coherency domain. This ensures that
139 * the value can be safely read irrespective of the state of the data
140 * cache.
141 */
142 flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
143}
144
145/*******************************************************************************
146 * This function reads the saved highest affinity level which is in OFF
147 * state. The affinity instance with which the level is associated is determined
148 * by the caller.
149 ******************************************************************************/
150uint32_t psci_get_max_phys_off_afflvl(void)
151{
152 /*
153 * Ensure that the last update of this value in this cpu's cache is
154 * flushed to main memory and any speculatively pre-fetched stale copies
155 * are invalidated from the caches of other cpus in the same coherency
156 * domain. This ensures that the value is always read from the main
157 * memory when it was written before the data cache was enabled.
158 */
159 flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
160 return get_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
161}
162
163/*******************************************************************************
Achin Guptaa45e3972013-12-05 15:10:48 +0000164 * Routine to return the maximum affinity level to traverse to after a cpu has
165 * been physically powered up. It is expected to be called immediately after
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100166 * reset from assembler code.
Achin Guptaa45e3972013-12-05 15:10:48 +0000167 ******************************************************************************/
Sandrine Bailleuxa64a8542015-03-05 10:54:34 +0000168int get_power_on_target_afflvl(void)
Achin Guptaa45e3972013-12-05 15:10:48 +0000169{
Vikram Kanigirif100f412014-04-01 19:26:26 +0100170 int afflvl;
Achin Guptaa45e3972013-12-05 15:10:48 +0000171
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100172#if DEBUG
173 unsigned int state;
174 aff_map_node_t *node;
175
Achin Guptaa45e3972013-12-05 15:10:48 +0000176 /* Retrieve our node from the topology tree */
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100177 node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
178 MPIDR_AFFLVL0);
Achin Guptaa45e3972013-12-05 15:10:48 +0000179 assert(node);
180
181 /*
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100182 * Sanity check the state of the cpu. It should be either suspend or "on
183 * pending"
Achin Guptaa45e3972013-12-05 15:10:48 +0000184 */
Achin Gupta75f73672013-12-05 16:33:10 +0000185 state = psci_get_state(node);
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100186 assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING);
187#endif
Achin Guptaa45e3972013-12-05 15:10:48 +0000188
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100189 /*
190 * Assume that this cpu was suspended and retrieve its target affinity
191 * level. If it is invalid then it could only have been turned off
Soby Mathew2b7de2b2015-02-12 14:45:02 +0000192 * earlier. PLATFORM_MAX_AFFLVL will be the highest affinity level a
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100193 * cpu can be turned off to.
194 */
195 afflvl = psci_get_suspend_afflvl();
196 if (afflvl == PSCI_INVALID_DATA)
Soby Mathew2b7de2b2015-02-12 14:45:02 +0000197 afflvl = PLATFORM_MAX_AFFLVL;
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100198 return afflvl;
Achin Guptaa45e3972013-12-05 15:10:48 +0000199}
200
201/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100202 * Simple routine to set the id of an affinity instance at a given level in the
203 * mpidr.
204 ******************************************************************************/
205unsigned long mpidr_set_aff_inst(unsigned long mpidr,
206 unsigned char aff_inst,
207 int aff_lvl)
208{
209 unsigned long aff_shift;
210
211 assert(aff_lvl <= MPIDR_AFFLVL3);
212
213 /*
214 * Decide the number of bits to shift by depending upon
215 * the affinity level
216 */
217 aff_shift = get_afflvl_shift(aff_lvl);
218
219 /* Clear the existing affinity instance & set the new one*/
Andrew Thoelke33e7d6a2015-06-11 14:22:07 +0100220 mpidr &= ~(((unsigned long)MPIDR_AFFLVL_MASK) << aff_shift);
221 mpidr |= ((unsigned long)aff_inst) << aff_shift;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100222
223 return mpidr;
224}
225
226/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000227 * This function sanity checks a range of affinity levels.
228 ******************************************************************************/
229int psci_check_afflvl_range(int start_afflvl, int end_afflvl)
230{
231 /* Sanity check the parameters passed */
Soby Mathew2b7de2b2015-02-12 14:45:02 +0000232 if (end_afflvl > PLATFORM_MAX_AFFLVL)
Achin Gupta0959db52013-12-02 17:33:04 +0000233 return PSCI_E_INVALID_PARAMS;
234
235 if (start_afflvl < MPIDR_AFFLVL0)
236 return PSCI_E_INVALID_PARAMS;
237
238 if (end_afflvl < start_afflvl)
239 return PSCI_E_INVALID_PARAMS;
240
241 return PSCI_E_SUCCESS;
242}
243
244/*******************************************************************************
245 * This function is passed an array of pointers to affinity level nodes in the
Achin Guptacab78e42014-07-28 00:09:01 +0100246 * topology tree for an mpidr and the state which each node should transition
247 * to. It updates the state of each node between the specified affinity levels.
248 ******************************************************************************/
249void psci_do_afflvl_state_mgmt(uint32_t start_afflvl,
250 uint32_t end_afflvl,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100251 aff_map_node_t *mpidr_nodes[],
Achin Guptacab78e42014-07-28 00:09:01 +0100252 uint32_t state)
253{
254 uint32_t level;
255
256 for (level = start_afflvl; level <= end_afflvl; level++) {
257 if (mpidr_nodes[level] == NULL)
258 continue;
259 psci_set_state(mpidr_nodes[level], state);
260 }
261}
262
263/*******************************************************************************
264 * This function is passed an array of pointers to affinity level nodes in the
Achin Gupta0959db52013-12-02 17:33:04 +0000265 * topology tree for an mpidr. It picks up locks for each affinity level bottom
266 * up in the range specified.
267 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100268void psci_acquire_afflvl_locks(int start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000269 int end_afflvl,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100270 aff_map_node_t *mpidr_nodes[])
Achin Gupta0959db52013-12-02 17:33:04 +0000271{
272 int level;
273
274 for (level = start_afflvl; level <= end_afflvl; level++) {
275 if (mpidr_nodes[level] == NULL)
276 continue;
Soby Mathew523d6332015-01-08 18:02:19 +0000277
278 psci_lock_get(mpidr_nodes[level]);
Achin Gupta0959db52013-12-02 17:33:04 +0000279 }
280}
281
282/*******************************************************************************
283 * This function is passed an array of pointers to affinity level nodes in the
284 * topology tree for an mpidr. It releases the lock for each affinity level top
285 * down in the range specified.
286 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100287void psci_release_afflvl_locks(int start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000288 int end_afflvl,
Achin Gupta56bcdc22014-07-28 00:15:23 +0100289 aff_map_node_t *mpidr_nodes[])
Achin Gupta0959db52013-12-02 17:33:04 +0000290{
291 int level;
292
293 for (level = end_afflvl; level >= start_afflvl; level--) {
294 if (mpidr_nodes[level] == NULL)
295 continue;
Soby Mathew523d6332015-01-08 18:02:19 +0000296
297 psci_lock_release(mpidr_nodes[level]);
Achin Gupta0959db52013-12-02 17:33:04 +0000298 }
299}
300
301/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100302 * Simple routine to determine whether an affinity instance at a given level
303 * in an mpidr exists or not.
304 ******************************************************************************/
305int psci_validate_mpidr(unsigned long mpidr, int level)
306{
Dan Handleye2712bc2014-04-10 15:37:22 +0100307 aff_map_node_t *node;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100308
309 node = psci_get_aff_map_node(mpidr, level);
310 if (node && (node->state & PSCI_AFF_PRESENT))
311 return PSCI_E_SUCCESS;
312 else
313 return PSCI_E_INVALID_PARAMS;
314}
315
316/*******************************************************************************
Andrew Thoelke4e126072014-06-04 21:10:52 +0100317 * This function determines the full entrypoint information for the requested
Soby Mathew8595b872015-01-06 15:36:38 +0000318 * PSCI entrypoint on power on/resume and returns it.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100319 ******************************************************************************/
Soby Mathew8595b872015-01-06 15:36:38 +0000320int psci_get_ns_ep_info(entry_point_info_t *ep,
321 uint64_t entrypoint, uint64_t context_id)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100322{
Andrew Thoelke4e126072014-06-04 21:10:52 +0100323 uint32_t ep_attr, mode, sctlr, daif, ee;
Soby Mathew8595b872015-01-06 15:36:38 +0000324 uint32_t ns_scr_el3 = read_scr_el3();
325 uint32_t ns_sctlr_el1 = read_sctlr_el1();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100326
Andrew Thoelke4e126072014-06-04 21:10:52 +0100327 sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
328 ee = 0;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100329
Andrew Thoelke4e126072014-06-04 21:10:52 +0100330 ep_attr = NON_SECURE | EP_ST_DISABLE;
331 if (sctlr & SCTLR_EE_BIT) {
332 ep_attr |= EP_EE_BIG;
333 ee = 1;
334 }
Soby Mathew8595b872015-01-06 15:36:38 +0000335 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100336
Soby Mathew8595b872015-01-06 15:36:38 +0000337 ep->pc = entrypoint;
338 memset(&ep->args, 0, sizeof(ep->args));
339 ep->args.arg0 = context_id;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100340
341 /*
342 * Figure out whether the cpu enters the non-secure address space
343 * in aarch32 or aarch64
344 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100345 if (ns_scr_el3 & SCR_RW_BIT) {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100346
347 /*
348 * Check whether a Thumb entry point has been provided for an
349 * aarch64 EL
350 */
351 if (entrypoint & 0x1)
352 return PSCI_E_INVALID_PARAMS;
353
Andrew Thoelke4e126072014-06-04 21:10:52 +0100354 mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100355
Soby Mathew8595b872015-01-06 15:36:38 +0000356 ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100357 } else {
358
Andrew Thoelke4e126072014-06-04 21:10:52 +0100359 mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100360
361 /*
362 * TODO: Choose async. exception bits if HYP mode is not
363 * implemented according to the values of SCR.{AW, FW} bits
364 */
Vikram Kanigiri9851e422014-05-13 14:42:08 +0100365 daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
366
Soby Mathew8595b872015-01-06 15:36:38 +0000367 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100368 }
369
Andrew Thoelke4e126072014-06-04 21:10:52 +0100370 return PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100371}
372
373/*******************************************************************************
Achin Gupta75f73672013-12-05 16:33:10 +0000374 * This function takes a pointer to an affinity node in the topology tree and
375 * returns its state. State of a non-leaf node needs to be calculated.
376 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100377unsigned short psci_get_state(aff_map_node_t *node)
Achin Gupta75f73672013-12-05 16:33:10 +0000378{
Soby Mathew2ae20432015-01-08 18:02:44 +0000379#if !USE_COHERENT_MEM
380 flush_dcache_range((uint64_t) node, sizeof(*node));
381#endif
382
Achin Gupta75f73672013-12-05 16:33:10 +0000383 assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
384
385 /* A cpu node just contains the state which can be directly returned */
386 if (node->level == MPIDR_AFFLVL0)
387 return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK;
388
389 /*
390 * For an affinity level higher than a cpu, the state has to be
391 * calculated. It depends upon the value of the reference count
392 * which is managed by each node at the next lower affinity level
393 * e.g. for a cluster, each cpu increments/decrements the reference
394 * count. If the reference count is 0 then the affinity level is
395 * OFF else ON.
396 */
397 if (node->ref_count)
398 return PSCI_STATE_ON;
399 else
400 return PSCI_STATE_OFF;
401}
402
403/*******************************************************************************
404 * This function takes a pointer to an affinity node in the topology tree and
405 * a target state. State of a non-leaf node needs to be converted to a reference
406 * count. State of a leaf node can be set directly.
407 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100408void psci_set_state(aff_map_node_t *node, unsigned short state)
Achin Gupta75f73672013-12-05 16:33:10 +0000409{
410 assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
411
412 /*
413 * For an affinity level higher than a cpu, the state is used
414 * to decide whether the reference count is incremented or
415 * decremented. Entry into the ON_PENDING state does not have
416 * effect.
417 */
418 if (node->level > MPIDR_AFFLVL0) {
419 switch (state) {
420 case PSCI_STATE_ON:
421 node->ref_count++;
422 break;
423 case PSCI_STATE_OFF:
424 case PSCI_STATE_SUSPEND:
425 node->ref_count--;
426 break;
427 case PSCI_STATE_ON_PENDING:
428 /*
429 * An affinity level higher than a cpu will not undergo
430 * a state change when it is about to be turned on
431 */
432 return;
433 default:
434 assert(0);
435 }
436 } else {
437 node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT);
438 node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT;
439 }
Soby Mathew2ae20432015-01-08 18:02:44 +0000440
441#if !USE_COHERENT_MEM
442 flush_dcache_range((uint64_t) node, sizeof(*node));
443#endif
Achin Gupta75f73672013-12-05 16:33:10 +0000444}
445
446/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100447 * An affinity level could be on, on_pending, suspended or off. These are the
Achin Gupta3140a9e2013-12-02 16:23:12 +0000448 * logical states it can be in. Physically either it is off or on. When it is in
449 * the state on_pending then it is about to be turned on. It is not possible to
Achin Gupta4f6ad662013-10-25 09:08:21 +0100450 * tell whether that's actually happenned or not. So we err on the side of
451 * caution & treat the affinity level as being turned off.
452 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100453unsigned short psci_get_phys_state(aff_map_node_t *node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100454{
Achin Gupta75f73672013-12-05 16:33:10 +0000455 unsigned int state;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100456
Achin Gupta75f73672013-12-05 16:33:10 +0000457 state = psci_get_state(node);
458 return get_phys_state(state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100459}
460
461/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000462 * This function takes an array of pointers to affinity instance nodes in the
463 * topology tree and calls the physical power on handler for the corresponding
464 * affinity levels
465 ******************************************************************************/
Soby Mathew74e52a72014-10-02 16:56:51 +0100466static void psci_call_power_on_handlers(aff_map_node_t *mpidr_nodes[],
Achin Gupta0959db52013-12-02 17:33:04 +0000467 int start_afflvl,
468 int end_afflvl,
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100469 afflvl_power_on_finisher_t *pon_handlers)
Achin Gupta0959db52013-12-02 17:33:04 +0000470{
Soby Mathew74e52a72014-10-02 16:56:51 +0100471 int level;
Dan Handleye2712bc2014-04-10 15:37:22 +0100472 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000473
474 for (level = end_afflvl; level >= start_afflvl; level--) {
475 node = mpidr_nodes[level];
476 if (node == NULL)
477 continue;
478
479 /*
480 * If we run into any trouble while powering up an
481 * affinity instance, then there is no recovery path
482 * so simply return an error and let the caller take
483 * care of the situation.
484 */
Soby Mathew74e52a72014-10-02 16:56:51 +0100485 pon_handlers[level](node);
Achin Gupta0959db52013-12-02 17:33:04 +0000486 }
Achin Gupta0959db52013-12-02 17:33:04 +0000487}
488
489/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100490 * Generic handler which is called when a cpu is physically powered on. It
Achin Gupta0959db52013-12-02 17:33:04 +0000491 * traverses through all the affinity levels performing generic, architectural,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100492 * platform setup and state management e.g. for a cluster that's been powered
493 * on, it will call the platform specific code which will enable coherency at
494 * the interconnect level. For a cpu it could mean turning on the MMU etc.
495 *
Achin Gupta0959db52013-12-02 17:33:04 +0000496 * The state of all the relevant affinity levels is changed after calling the
497 * affinity level specific handlers as their actions would depend upon the state
498 * the affinity level is exiting from.
499 *
500 * The affinity level specific handlers are called in descending order i.e. from
501 * the highest to the lowest affinity level implemented by the platform because
502 * to turn on affinity level X it is neccesary to turn on affinity level X + 1
503 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100504 ******************************************************************************/
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100505void psci_afflvl_power_on_finish(int start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000506 int end_afflvl,
Dan Handleye2712bc2014-04-10 15:37:22 +0100507 afflvl_power_on_finisher_t *pon_handlers)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100508{
Dan Handleye2712bc2014-04-10 15:37:22 +0100509 mpidr_aff_map_nodes_t mpidr_nodes;
Achin Gupta0959db52013-12-02 17:33:04 +0000510 int rc;
Achin Guptaf6b9e992014-07-31 11:19:11 +0100511 unsigned int max_phys_off_afflvl;
512
Achin Gupta4f6ad662013-10-25 09:08:21 +0100513
Achin Gupta4f6ad662013-10-25 09:08:21 +0100514 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000515 * Collect the pointers to the nodes in the topology tree for
516 * each affinity instance in the mpidr. If this function does
517 * not return successfully then either the mpidr or the affinity
518 * levels are incorrect. Either case is an irrecoverable error.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100519 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100520 rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
Achin Gupta0959db52013-12-02 17:33:04 +0000521 start_afflvl,
522 end_afflvl,
523 mpidr_nodes);
James Morrissey40a6f642014-02-10 14:24:36 +0000524 if (rc != PSCI_E_SUCCESS)
525 panic();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100526
527 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000528 * This function acquires the lock corresponding to each affinity
529 * level so that by the time all locks are taken, the system topology
530 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100531 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100532 psci_acquire_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000533 end_afflvl,
534 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100535
Achin Guptaf6b9e992014-07-31 11:19:11 +0100536 max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
537 end_afflvl,
538 mpidr_nodes);
539 assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
540
541 /*
542 * Stash the highest affinity level that will come out of the OFF or
543 * SUSPEND states.
544 */
545 psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
546
Achin Gupta4f6ad662013-10-25 09:08:21 +0100547 /* Perform generic, architecture and platform specific handling */
Soby Mathew74e52a72014-10-02 16:56:51 +0100548 psci_call_power_on_handlers(mpidr_nodes,
Achin Gupta0959db52013-12-02 17:33:04 +0000549 start_afflvl,
550 end_afflvl,
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100551 pon_handlers);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100552
553 /*
Achin Guptacab78e42014-07-28 00:09:01 +0100554 * This function updates the state of each affinity instance
555 * corresponding to the mpidr in the range of affinity levels
556 * specified.
557 */
558 psci_do_afflvl_state_mgmt(start_afflvl,
559 end_afflvl,
560 mpidr_nodes,
561 PSCI_STATE_ON);
562
563 /*
Achin Guptaf6b9e992014-07-31 11:19:11 +0100564 * Invalidate the entry for the highest affinity level stashed earlier.
565 * This ensures that any reads of this variable outside the power
566 * up/down sequences return PSCI_INVALID_DATA
567 */
568 psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
569
570 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000571 * This loop releases the lock corresponding to each affinity level
572 * in the reverse order to which they were acquired.
573 */
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100574 psci_release_afflvl_locks(start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +0000575 end_afflvl,
576 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100577}
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000578
579/*******************************************************************************
580 * This function initializes the set of hooks that PSCI invokes as part of power
581 * management operation. The power management hooks are expected to be provided
582 * by the SPD, after it finishes all its initialization
583 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100584void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000585{
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000586 assert(pm);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000587 psci_spd_pm = pm;
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000588
589 if (pm->svc_migrate)
590 psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
591
592 if (pm->svc_migrate_info)
593 psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
594 | define_psci_cap(PSCI_MIG_INFO_TYPE);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000595}
Juan Castillo4dc4a472014-08-12 11:17:06 +0100596
597/*******************************************************************************
Soby Mathew110fe362014-10-23 10:35:34 +0100598 * This function invokes the migrate info hook in the spd_pm_ops. It performs
599 * the necessary return value validation. If the Secure Payload is UP and
600 * migrate capable, it returns the mpidr of the CPU on which the Secure payload
601 * is resident through the mpidr parameter. Else the value of the parameter on
602 * return is undefined.
603 ******************************************************************************/
604int psci_spd_migrate_info(uint64_t *mpidr)
605{
606 int rc;
607
608 if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
609 return PSCI_E_NOT_SUPPORTED;
610
611 rc = psci_spd_pm->svc_migrate_info(mpidr);
612
613 assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
614 || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
615
616 return rc;
617}
618
619
620/*******************************************************************************
Juan Castillo4dc4a472014-08-12 11:17:06 +0100621 * This function prints the state of all affinity instances present in the
622 * system
623 ******************************************************************************/
624void psci_print_affinity_map(void)
625{
626#if LOG_LEVEL >= LOG_LEVEL_INFO
627 aff_map_node_t *node;
628 unsigned int idx;
629 /* This array maps to the PSCI_STATE_X definitions in psci.h */
630 static const char *psci_state_str[] = {
631 "ON",
632 "OFF",
633 "ON_PENDING",
634 "SUSPEND"
635 };
636
637 INFO("PSCI Affinity Map:\n");
638 for (idx = 0; idx < PSCI_NUM_AFFS ; idx++) {
639 node = &psci_aff_map[idx];
640 if (!(node->state & PSCI_AFF_PRESENT)) {
641 continue;
642 }
643 INFO(" AffInst: Level %u, MPID 0x%lx, State %s\n",
644 node->level, node->mpidr,
645 psci_state_str[psci_get_state(node)]);
646 }
647#endif
648}