blob: 323dc62cbc057aef7660a3e84c96d871000cc856 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +00002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <assert.h>
34#include <bl_common.h>
35#include <context.h>
Achin Guptaef7a28c2014-02-01 08:59:56 +000036#include <context_mgmt.h>
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +000037#include <errata_report.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010038#include <platform.h>
39#include <stddef.h>
Dan Handley714a0d22014-04-09 13:13:04 +010040#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010041
42/*******************************************************************************
Achin Guptaef7a28c2014-02-01 08:59:56 +000043 * Per cpu non-secure contexts used to program the architectural state prior
44 * return to the normal world.
45 * TODO: Use the memory allocator to set aside memory for the contexts instead
Soby Mathew981487a2015-07-13 14:10:57 +010046 * of relying on platform defined constants.
Achin Guptaef7a28c2014-02-01 08:59:56 +000047 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +010048static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
Achin Guptaef7a28c2014-02-01 08:59:56 +000049
Soby Mathew6cdddaf2015-01-07 11:10:22 +000050/******************************************************************************
51 * Define the psci capability variable.
52 *****************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +010053unsigned int psci_caps;
Soby Mathew6cdddaf2015-01-07 11:10:22 +000054
Dan Handley60b13e32014-05-14 15:13:16 +010055/*******************************************************************************
Soby Mathew981487a2015-07-13 14:10:57 +010056 * Function which initializes the 'psci_non_cpu_pd_nodes' or the
57 * 'psci_cpu_pd_nodes' corresponding to the power level.
Achin Gupta4f6ad662013-10-25 09:08:21 +010058 ******************************************************************************/
Soby Mathew011ca182015-07-29 17:05:03 +010059static void psci_init_pwr_domain_node(unsigned int node_idx,
60 unsigned int parent_idx,
61 unsigned int level)
Achin Gupta4f6ad662013-10-25 09:08:21 +010062{
Soby Mathew981487a2015-07-13 14:10:57 +010063 if (level > PSCI_CPU_PWR_LVL) {
64 psci_non_cpu_pd_nodes[node_idx].level = level;
65 psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
66 psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
67 psci_non_cpu_pd_nodes[node_idx].local_state =
68 PLAT_MAX_OFF_STATE;
69 } else {
70 psci_cpu_data_t *svc_cpu_data;
Achin Gupta4f6ad662013-10-25 09:08:21 +010071
Soby Mathew981487a2015-07-13 14:10:57 +010072 psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
Achin Gupta4f6ad662013-10-25 09:08:21 +010073
Soby Mathew981487a2015-07-13 14:10:57 +010074 /* Initialize with an invalid mpidr */
75 psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
Soby Mathew2b697502014-10-02 17:24:19 +010076
Soby Mathew981487a2015-07-13 14:10:57 +010077 svc_cpu_data =
78 &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
Soby Mathew2b697502014-10-02 17:24:19 +010079
Soby Mathew981487a2015-07-13 14:10:57 +010080 /* Set the Affinity Info for the cores as OFF */
81 svc_cpu_data->aff_info_state = AFF_STATE_OFF;
Achin Gupta4f6ad662013-10-25 09:08:21 +010082
Soby Mathew981487a2015-07-13 14:10:57 +010083 /* Invalidate the suspend level for the cpu */
Soby Mathew011ca182015-07-29 17:05:03 +010084 svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
Achin Gupta4f6ad662013-10-25 09:08:21 +010085
Soby Mathew981487a2015-07-13 14:10:57 +010086 /* Set the power state to OFF state */
87 svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
Soby Mathew2b697502014-10-02 17:24:19 +010088
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +000089 psci_flush_dcache_range((uintptr_t)svc_cpu_data,
Soby Mathew981487a2015-07-13 14:10:57 +010090 sizeof(*svc_cpu_data));
Achin Gupta4f6ad662013-10-25 09:08:21 +010091
Soby Mathew981487a2015-07-13 14:10:57 +010092 cm_set_context_by_index(node_idx,
93 (void *) &psci_ns_context[node_idx],
94 NON_SECURE);
95 }
Achin Gupta4f6ad662013-10-25 09:08:21 +010096}
97
98/*******************************************************************************
Soby Mathew981487a2015-07-13 14:10:57 +010099 * This functions updates cpu_start_idx and ncpus field for each of the node in
100 * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
101 * the CPUs and check whether they match with the parent of the previous
102 * CPU. The basic assumption for this work is that children of the same parent
103 * are allocated adjacent indices. The platform should ensure this though proper
104 * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
105 * plat_my_core_pos() APIs.
106 *******************************************************************************/
107static void psci_update_pwrlvl_limits(void)
Achin Gupta0959db52013-12-02 17:33:04 +0000108{
Soby Mathew011ca182015-07-29 17:05:03 +0100109 int j;
Soby Mathew981487a2015-07-13 14:10:57 +0100110 unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
Soby Mathew011ca182015-07-29 17:05:03 +0100111 unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
Achin Gupta0959db52013-12-02 17:33:04 +0000112
Soby Mathew981487a2015-07-13 14:10:57 +0100113 for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
114 psci_get_parent_pwr_domain_nodes(cpu_idx,
115 PLAT_MAX_PWR_LVL,
116 temp_index);
117 for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
118 if (temp_index[j] != nodes_idx[j]) {
119 nodes_idx[j] = temp_index[j];
120 psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
121 = cpu_idx;
Achin Gupta0959db52013-12-02 17:33:04 +0000122 }
Soby Mathew981487a2015-07-13 14:10:57 +0100123 psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
124 }
Achin Gupta0959db52013-12-02 17:33:04 +0000125 }
Achin Gupta0959db52013-12-02 17:33:04 +0000126}
127
128/*******************************************************************************
Soby Mathew981487a2015-07-13 14:10:57 +0100129 * Core routine to populate the power domain tree. The tree descriptor passed by
130 * the platform is populated breadth-first and the first entry in the map
131 * informs the number of root power domains. The parent nodes of the root nodes
132 * will point to an invalid entry(-1).
Achin Gupta4f6ad662013-10-25 09:08:21 +0100133 ******************************************************************************/
Soby Mathew981487a2015-07-13 14:10:57 +0100134static void populate_power_domain_tree(const unsigned char *topology)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100135{
Soby Mathew981487a2015-07-13 14:10:57 +0100136 unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
137 unsigned int node_index = 0, parent_node_index = 0, num_children;
138 int level = PLAT_MAX_PWR_LVL;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100139
140 /*
Soby Mathew981487a2015-07-13 14:10:57 +0100141 * For each level the inputs are:
142 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
143 * This is the sum of values of nodes at the parent level.
144 * - Index of first entry at this level in the plat_array i.e.
145 * parent_node_index.
146 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
147 * psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100148 */
Soby Mathew981487a2015-07-13 14:10:57 +0100149 while (level >= PSCI_CPU_PWR_LVL) {
150 num_nodes_at_next_lvl = 0;
Achin Guptaef7a28c2014-02-01 08:59:56 +0000151 /*
Soby Mathew981487a2015-07-13 14:10:57 +0100152 * For each entry (parent node) at this level in the plat_array:
153 * - Find the number of children
154 * - Allocate a node in a power domain array for each child
155 * - Set the parent of the child to the parent_node_index - 1
156 * - Increment parent_node_index to point to the next parent
157 * - Accumulate the number of children at next level.
Achin Guptaef7a28c2014-02-01 08:59:56 +0000158 */
Soby Mathew981487a2015-07-13 14:10:57 +0100159 for (i = 0; i < num_nodes_at_lvl; i++) {
160 assert(parent_node_index <=
161 PSCI_NUM_NON_CPU_PWR_DOMAINS);
162 num_children = topology[parent_node_index];
Achin Guptaef7a28c2014-02-01 08:59:56 +0000163
Soby Mathew981487a2015-07-13 14:10:57 +0100164 for (j = node_index;
165 j < node_index + num_children; j++)
166 psci_init_pwr_domain_node(j,
167 parent_node_index - 1,
168 level);
Achin Guptaf3ccbab2014-07-25 14:52:47 +0100169
Soby Mathew981487a2015-07-13 14:10:57 +0100170 node_index = j;
171 num_nodes_at_next_lvl += num_children;
172 parent_node_index++;
173 }
Achin Guptaf6b9e992014-07-31 11:19:11 +0100174
Soby Mathew981487a2015-07-13 14:10:57 +0100175 num_nodes_at_lvl = num_nodes_at_next_lvl;
176 level--;
Soby Mathew7d861ea2014-11-18 10:14:14 +0000177
Soby Mathew981487a2015-07-13 14:10:57 +0100178 /* Reset the index for the cpu power domain array */
179 if (level == PSCI_CPU_PWR_LVL)
180 node_index = 0;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100181 }
182
Soby Mathew981487a2015-07-13 14:10:57 +0100183 /* Validate the sanity of array exported by the platform */
184 assert(j == PLATFORM_CORE_COUNT);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100185}
186
187/*******************************************************************************
Soby Mathewd0194872016-04-29 19:01:30 +0100188 * This function does the architectural setup and takes the warm boot
189 * entry-point `mailbox_ep` as an argument. The function also initializes the
190 * power domain topology tree by querying the platform. The power domain nodes
191 * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
192 * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
193 * exports its static topology map through the
Soby Mathew981487a2015-07-13 14:10:57 +0100194 * populate_power_domain_topology_tree() API. The algorithm populates the
195 * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
Soby Mathewd0194872016-04-29 19:01:30 +0100196 * topology map. On a platform that implements two clusters of 2 cpus each,
197 * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
198 * look like this:
Achin Gupta4f6ad662013-10-25 09:08:21 +0100199 *
Achin Gupta4f6ad662013-10-25 09:08:21 +0100200 * ---------------------------------------------------
Soby Mathew981487a2015-07-13 14:10:57 +0100201 * | system node | cluster 0 node | cluster 1 node |
Achin Gupta4f6ad662013-10-25 09:08:21 +0100202 * ---------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100203 *
Soby Mathew981487a2015-07-13 14:10:57 +0100204 * And populated psci_cpu_pd_nodes would look like this :
205 * <- cpus cluster0 -><- cpus cluster1 ->
206 * ------------------------------------------------
207 * | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
208 * ------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100209 ******************************************************************************/
Soby Mathew89256b82016-09-13 14:19:08 +0100210int psci_setup(const psci_lib_args_t *lib_args)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100211{
Soby Mathew981487a2015-07-13 14:10:57 +0100212 const unsigned char *topology_tree;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100213
Soby Mathew89256b82016-09-13 14:19:08 +0100214 assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));
215
Soby Mathewd0194872016-04-29 19:01:30 +0100216 /* Do the Architectural initialization */
217 psci_arch_setup();
218
Soby Mathew981487a2015-07-13 14:10:57 +0100219 /* Query the topology map from the platform */
220 topology_tree = plat_get_power_domain_tree_desc();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100221
Soby Mathew981487a2015-07-13 14:10:57 +0100222 /* Populate the power domain arrays using the platform topology map */
223 populate_power_domain_tree(topology_tree);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100224
Soby Mathew981487a2015-07-13 14:10:57 +0100225 /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
226 psci_update_pwrlvl_limits();
227
228 /* Populate the mpidr field of cpu node for this CPU */
229 psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
230 read_mpidr() & MPIDR_AFFINITY_MASK;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100231
Soby Mathew981487a2015-07-13 14:10:57 +0100232 psci_init_req_local_pwr_states();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100233
234 /*
Soby Mathew981487a2015-07-13 14:10:57 +0100235 * Set the requested and target state of this CPU and all the higher
236 * power domain levels for this CPU to run.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100237 */
Soby Mathew981487a2015-07-13 14:10:57 +0100238 psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100239
Soby Mathew89256b82016-09-13 14:19:08 +0100240 plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100241 assert(psci_plat_pm_ops);
242
Soby Mathew7c9d5f82016-09-09 11:33:58 +0100243 /*
244 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000245 * during warm boot, possibly before data cache is enabled.
Soby Mathew7c9d5f82016-09-09 11:33:58 +0100246 */
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +0000247 psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
Soby Mathew7c9d5f82016-09-09 11:33:58 +0100248 sizeof(psci_plat_pm_ops));
249
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000250 /* Initialize the psci capability */
251 psci_caps = PSCI_GENERIC_CAP;
252
Soby Mathew981487a2015-07-13 14:10:57 +0100253 if (psci_plat_pm_ops->pwr_domain_off)
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000254 psci_caps |= define_psci_cap(PSCI_CPU_OFF);
Soby Mathew981487a2015-07-13 14:10:57 +0100255 if (psci_plat_pm_ops->pwr_domain_on &&
256 psci_plat_pm_ops->pwr_domain_on_finish)
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000257 psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
Soby Mathew981487a2015-07-13 14:10:57 +0100258 if (psci_plat_pm_ops->pwr_domain_suspend &&
259 psci_plat_pm_ops->pwr_domain_suspend_finish) {
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000260 psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
Soby Mathew96168382014-12-17 14:47:57 +0000261 if (psci_plat_pm_ops->get_sys_suspend_power_state)
262 psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
263 }
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000264 if (psci_plat_pm_ops->system_off)
265 psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF);
266 if (psci_plat_pm_ops->system_reset)
267 psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET);
Jeenu Viswambharan7f03e9d92016-08-03 15:54:50 +0100268 if (psci_plat_pm_ops->get_node_hw_state)
269 psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000270
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100271#if ENABLE_PSCI_STAT
272 psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
273 psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64);
274#endif
275
Achin Gupta7421b462014-02-01 18:53:26 +0000276 return 0;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100277}
Soby Mathewd0194872016-04-29 19:01:30 +0100278
279/*******************************************************************************
280 * This duplicates what the primary cpu did after a cold boot in BL1. The same
281 * needs to be done when a cpu is hotplugged in. This function could also over-
282 * ride any EL3 setup done by BL1 as this code resides in rw memory.
283 ******************************************************************************/
284void psci_arch_setup(void)
285{
286 /* Program the counter frequency */
287 write_cntfrq_el0(plat_get_syscnt_freq2());
288
289 /* Initialize the cpu_ops pointer. */
290 init_cpu_ops();
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000291
292 /* Having initialized cpu_ops, we can now print errata status */
293 print_errata_status();
Soby Mathewd0194872016-04-29 19:01:30 +0100294}
Soby Mathew89d90dc2016-05-05 14:11:23 +0100295
296/******************************************************************************
297 * PSCI Library interface to initialize the cpu context for the next non
298 * secure image during cold boot. The relevant registers in the cpu context
299 * need to be retrieved and programmed on return from this interface.
300 *****************************************************************************/
301void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
302{
303 assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
304 cm_init_my_context(next_image_info);
305 cm_prepare_el3_exit(NON_SECURE);
306}