Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 1 | /* |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 2 | * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are met: |
| 6 | * |
| 7 | * Redistributions of source code must retain the above copyright notice, this |
| 8 | * list of conditions and the following disclaimer. |
| 9 | * |
| 10 | * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * Neither the name of ARM nor the names of its contributors may be used |
| 15 | * to endorse or promote products derived from this software without specific |
| 16 | * prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 28 | * POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 31 | #include <arch.h> |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 32 | #include <arch_helpers.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 33 | #include <assert.h> |
| 34 | #include <bl_common.h> |
| 35 | #include <context.h> |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 36 | #include <context_mgmt.h> |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 37 | #include <errata_report.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 38 | #include <platform.h> |
| 39 | #include <stddef.h> |
Dan Handley | 714a0d2 | 2014-04-09 13:13:04 +0100 | [diff] [blame] | 40 | #include "psci_private.h" |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 41 | |
| 42 | /******************************************************************************* |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 43 | * Per cpu non-secure contexts used to program the architectural state prior |
| 44 | * return to the normal world. |
| 45 | * TODO: Use the memory allocator to set aside memory for the contexts instead |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 46 | * of relying on platform defined constants. |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 47 | ******************************************************************************/ |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 48 | static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 49 | |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 50 | /****************************************************************************** |
| 51 | * Define the psci capability variable. |
| 52 | *****************************************************************************/ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 53 | unsigned int psci_caps; |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 54 | |
Dan Handley | 60b13e3 | 2014-05-14 15:13:16 +0100 | [diff] [blame] | 55 | /******************************************************************************* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 56 | * Function which initializes the 'psci_non_cpu_pd_nodes' or the |
| 57 | * 'psci_cpu_pd_nodes' corresponding to the power level. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 58 | ******************************************************************************/ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 59 | static void psci_init_pwr_domain_node(unsigned int node_idx, |
| 60 | unsigned int parent_idx, |
| 61 | unsigned int level) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 62 | { |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 63 | if (level > PSCI_CPU_PWR_LVL) { |
| 64 | psci_non_cpu_pd_nodes[node_idx].level = level; |
| 65 | psci_lock_init(psci_non_cpu_pd_nodes, node_idx); |
| 66 | psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx; |
| 67 | psci_non_cpu_pd_nodes[node_idx].local_state = |
| 68 | PLAT_MAX_OFF_STATE; |
| 69 | } else { |
| 70 | psci_cpu_data_t *svc_cpu_data; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 71 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 72 | psci_cpu_pd_nodes[node_idx].parent_node = parent_idx; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 73 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 74 | /* Initialize with an invalid mpidr */ |
| 75 | psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR; |
Soby Mathew | 2b69750 | 2014-10-02 17:24:19 +0100 | [diff] [blame] | 76 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 77 | svc_cpu_data = |
| 78 | &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data); |
Soby Mathew | 2b69750 | 2014-10-02 17:24:19 +0100 | [diff] [blame] | 79 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 80 | /* Set the Affinity Info for the cores as OFF */ |
| 81 | svc_cpu_data->aff_info_state = AFF_STATE_OFF; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 82 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 83 | /* Invalidate the suspend level for the cpu */ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 84 | svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 85 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 86 | /* Set the power state to OFF state */ |
| 87 | svc_cpu_data->local_state = PLAT_MAX_OFF_STATE; |
Soby Mathew | 2b69750 | 2014-10-02 17:24:19 +0100 | [diff] [blame] | 88 | |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 89 | flush_dcache_range((uintptr_t)svc_cpu_data, |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 90 | sizeof(*svc_cpu_data)); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 91 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 92 | cm_set_context_by_index(node_idx, |
| 93 | (void *) &psci_ns_context[node_idx], |
| 94 | NON_SECURE); |
| 95 | } |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | /******************************************************************************* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 99 | * This functions updates cpu_start_idx and ncpus field for each of the node in |
| 100 | * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of |
| 101 | * the CPUs and check whether they match with the parent of the previous |
| 102 | * CPU. The basic assumption for this work is that children of the same parent |
| 103 | * are allocated adjacent indices. The platform should ensure this though proper |
| 104 | * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and |
| 105 | * plat_my_core_pos() APIs. |
| 106 | *******************************************************************************/ |
| 107 | static void psci_update_pwrlvl_limits(void) |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 108 | { |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 109 | int j; |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 110 | unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 111 | unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx; |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 112 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 113 | for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { |
| 114 | psci_get_parent_pwr_domain_nodes(cpu_idx, |
| 115 | PLAT_MAX_PWR_LVL, |
| 116 | temp_index); |
| 117 | for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { |
| 118 | if (temp_index[j] != nodes_idx[j]) { |
| 119 | nodes_idx[j] = temp_index[j]; |
| 120 | psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx |
| 121 | = cpu_idx; |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 122 | } |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 123 | psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++; |
| 124 | } |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 125 | } |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | /******************************************************************************* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 129 | * Core routine to populate the power domain tree. The tree descriptor passed by |
| 130 | * the platform is populated breadth-first and the first entry in the map |
| 131 | * informs the number of root power domains. The parent nodes of the root nodes |
| 132 | * will point to an invalid entry(-1). |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 133 | ******************************************************************************/ |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 134 | static void populate_power_domain_tree(const unsigned char *topology) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 135 | { |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 136 | unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl; |
| 137 | unsigned int node_index = 0, parent_node_index = 0, num_children; |
| 138 | int level = PLAT_MAX_PWR_LVL; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 139 | |
| 140 | /* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 141 | * For each level the inputs are: |
| 142 | * - number of nodes at this level in plat_array i.e. num_nodes_at_level |
| 143 | * This is the sum of values of nodes at the parent level. |
| 144 | * - Index of first entry at this level in the plat_array i.e. |
| 145 | * parent_node_index. |
| 146 | * - Index of first free entry in psci_non_cpu_pd_nodes[] or |
| 147 | * psci_cpu_pd_nodes[] i.e. node_index depending upon the level. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 148 | */ |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 149 | while (level >= PSCI_CPU_PWR_LVL) { |
| 150 | num_nodes_at_next_lvl = 0; |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 151 | /* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 152 | * For each entry (parent node) at this level in the plat_array: |
| 153 | * - Find the number of children |
| 154 | * - Allocate a node in a power domain array for each child |
| 155 | * - Set the parent of the child to the parent_node_index - 1 |
| 156 | * - Increment parent_node_index to point to the next parent |
| 157 | * - Accumulate the number of children at next level. |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 158 | */ |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 159 | for (i = 0; i < num_nodes_at_lvl; i++) { |
| 160 | assert(parent_node_index <= |
| 161 | PSCI_NUM_NON_CPU_PWR_DOMAINS); |
| 162 | num_children = topology[parent_node_index]; |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 163 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 164 | for (j = node_index; |
| 165 | j < node_index + num_children; j++) |
| 166 | psci_init_pwr_domain_node(j, |
| 167 | parent_node_index - 1, |
| 168 | level); |
Achin Gupta | f3ccbab | 2014-07-25 14:52:47 +0100 | [diff] [blame] | 169 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 170 | node_index = j; |
| 171 | num_nodes_at_next_lvl += num_children; |
| 172 | parent_node_index++; |
| 173 | } |
Achin Gupta | f6b9e99 | 2014-07-31 11:19:11 +0100 | [diff] [blame] | 174 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 175 | num_nodes_at_lvl = num_nodes_at_next_lvl; |
| 176 | level--; |
Soby Mathew | 7d861ea | 2014-11-18 10:14:14 +0000 | [diff] [blame] | 177 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 178 | /* Reset the index for the cpu power domain array */ |
| 179 | if (level == PSCI_CPU_PWR_LVL) |
| 180 | node_index = 0; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 181 | } |
| 182 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 183 | /* Validate the sanity of array exported by the platform */ |
| 184 | assert(j == PLATFORM_CORE_COUNT); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | /******************************************************************************* |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 188 | * This function does the architectural setup and takes the warm boot |
| 189 | * entry-point `mailbox_ep` as an argument. The function also initializes the |
| 190 | * power domain topology tree by querying the platform. The power domain nodes |
| 191 | * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and |
| 192 | * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform |
| 193 | * exports its static topology map through the |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 194 | * populate_power_domain_topology_tree() API. The algorithm populates the |
| 195 | * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 196 | * topology map. On a platform that implements two clusters of 2 cpus each, |
| 197 | * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would |
| 198 | * look like this: |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 199 | * |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 200 | * --------------------------------------------------- |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 201 | * | system node | cluster 0 node | cluster 1 node | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 202 | * --------------------------------------------------- |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 203 | * |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 204 | * And populated psci_cpu_pd_nodes would look like this : |
| 205 | * <- cpus cluster0 -><- cpus cluster1 -> |
| 206 | * ------------------------------------------------ |
| 207 | * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | |
| 208 | * ------------------------------------------------ |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 209 | ******************************************************************************/ |
Soby Mathew | 89256b8 | 2016-09-13 14:19:08 +0100 | [diff] [blame] | 210 | int psci_setup(const psci_lib_args_t *lib_args) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 211 | { |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 212 | const unsigned char *topology_tree; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 213 | |
Soby Mathew | 89256b8 | 2016-09-13 14:19:08 +0100 | [diff] [blame] | 214 | assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args)); |
| 215 | |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 216 | /* Do the Architectural initialization */ |
| 217 | psci_arch_setup(); |
| 218 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 219 | /* Query the topology map from the platform */ |
| 220 | topology_tree = plat_get_power_domain_tree_desc(); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 221 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 222 | /* Populate the power domain arrays using the platform topology map */ |
| 223 | populate_power_domain_tree(topology_tree); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 224 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 225 | /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ |
| 226 | psci_update_pwrlvl_limits(); |
| 227 | |
| 228 | /* Populate the mpidr field of cpu node for this CPU */ |
| 229 | psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = |
| 230 | read_mpidr() & MPIDR_AFFINITY_MASK; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 231 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 232 | psci_init_req_local_pwr_states(); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 233 | |
| 234 | /* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 235 | * Set the requested and target state of this CPU and all the higher |
| 236 | * power domain levels for this CPU to run. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 237 | */ |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 238 | psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 239 | |
Soby Mathew | 89256b8 | 2016-09-13 14:19:08 +0100 | [diff] [blame] | 240 | plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 241 | assert(psci_plat_pm_ops); |
| 242 | |
Soby Mathew | 7c9d5f8 | 2016-09-09 11:33:58 +0100 | [diff] [blame] | 243 | /* |
| 244 | * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs |
| 245 | * during warm boot before data cache is enabled. |
| 246 | */ |
| 247 | flush_dcache_range((uintptr_t)&psci_plat_pm_ops, |
| 248 | sizeof(psci_plat_pm_ops)); |
| 249 | |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 250 | /* Initialize the psci capability */ |
| 251 | psci_caps = PSCI_GENERIC_CAP; |
| 252 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 253 | if (psci_plat_pm_ops->pwr_domain_off) |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 254 | psci_caps |= define_psci_cap(PSCI_CPU_OFF); |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 255 | if (psci_plat_pm_ops->pwr_domain_on && |
| 256 | psci_plat_pm_ops->pwr_domain_on_finish) |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 257 | psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 258 | if (psci_plat_pm_ops->pwr_domain_suspend && |
| 259 | psci_plat_pm_ops->pwr_domain_suspend_finish) { |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 260 | psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); |
Soby Mathew | 9616838 | 2014-12-17 14:47:57 +0000 | [diff] [blame] | 261 | if (psci_plat_pm_ops->get_sys_suspend_power_state) |
| 262 | psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); |
| 263 | } |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 264 | if (psci_plat_pm_ops->system_off) |
| 265 | psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); |
| 266 | if (psci_plat_pm_ops->system_reset) |
| 267 | psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); |
Jeenu Viswambharan | 7f03e9d9 | 2016-08-03 15:54:50 +0100 | [diff] [blame] | 268 | if (psci_plat_pm_ops->get_node_hw_state) |
| 269 | psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64); |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 270 | |
Yatharth Kochar | 241ec6c | 2016-05-09 18:26:35 +0100 | [diff] [blame] | 271 | #if ENABLE_PSCI_STAT |
| 272 | psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64); |
| 273 | psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64); |
| 274 | #endif |
| 275 | |
Achin Gupta | 7421b46 | 2014-02-01 18:53:26 +0000 | [diff] [blame] | 276 | return 0; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 277 | } |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 278 | |
| 279 | /******************************************************************************* |
| 280 | * This duplicates what the primary cpu did after a cold boot in BL1. The same |
| 281 | * needs to be done when a cpu is hotplugged in. This function could also over- |
| 282 | * ride any EL3 setup done by BL1 as this code resides in rw memory. |
| 283 | ******************************************************************************/ |
| 284 | void psci_arch_setup(void) |
| 285 | { |
| 286 | /* Program the counter frequency */ |
| 287 | write_cntfrq_el0(plat_get_syscnt_freq2()); |
| 288 | |
| 289 | /* Initialize the cpu_ops pointer. */ |
| 290 | init_cpu_ops(); |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 291 | |
| 292 | /* Having initialized cpu_ops, we can now print errata status */ |
| 293 | print_errata_status(); |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 294 | } |
Soby Mathew | 89d90dc | 2016-05-05 14:11:23 +0100 | [diff] [blame] | 295 | |
| 296 | /****************************************************************************** |
| 297 | * PSCI Library interface to initialize the cpu context for the next non |
| 298 | * secure image during cold boot. The relevant registers in the cpu context |
| 299 | * need to be retrieved and programmed on return from this interface. |
| 300 | *****************************************************************************/ |
| 301 | void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info) |
| 302 | { |
| 303 | assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE); |
| 304 | cm_init_my_context(next_image_info); |
| 305 | cm_prepare_el3_exit(NON_SECURE); |
| 306 | } |