Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 1 | /* |
Soby Mathew | 0d78607 | 2016-03-24 16:56:29 +0000 | [diff] [blame] | 2 | * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are met: |
| 6 | * |
| 7 | * Redistributions of source code must retain the above copyright notice, this |
| 8 | * list of conditions and the following disclaimer. |
| 9 | * |
| 10 | * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * Neither the name of ARM nor the names of its contributors may be used |
| 15 | * to endorse or promote products derived from this software without specific |
| 16 | * prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 28 | * POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 31 | #include <arch.h> |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 32 | #include <arch_helpers.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 33 | #include <assert.h> |
| 34 | #include <bl_common.h> |
| 35 | #include <context.h> |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 36 | #include <context_mgmt.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 37 | #include <platform.h> |
| 38 | #include <stddef.h> |
Dan Handley | 714a0d2 | 2014-04-09 13:13:04 +0100 | [diff] [blame] | 39 | #include "psci_private.h" |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 40 | |
| 41 | /******************************************************************************* |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 42 | * Per cpu non-secure contexts used to program the architectural state prior |
| 43 | * return to the normal world. |
| 44 | * TODO: Use the memory allocator to set aside memory for the contexts instead |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 45 | * of relying on platform defined constants. |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 46 | ******************************************************************************/ |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 47 | static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 48 | |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 49 | /****************************************************************************** |
| 50 | * Define the psci capability variable. |
| 51 | *****************************************************************************/ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 52 | unsigned int psci_caps; |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 53 | |
Dan Handley | 60b13e3 | 2014-05-14 15:13:16 +0100 | [diff] [blame] | 54 | /******************************************************************************* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 55 | * Function which initializes the 'psci_non_cpu_pd_nodes' or the |
| 56 | * 'psci_cpu_pd_nodes' corresponding to the power level. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 57 | ******************************************************************************/ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 58 | static void psci_init_pwr_domain_node(unsigned int node_idx, |
| 59 | unsigned int parent_idx, |
| 60 | unsigned int level) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 61 | { |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 62 | if (level > PSCI_CPU_PWR_LVL) { |
| 63 | psci_non_cpu_pd_nodes[node_idx].level = level; |
| 64 | psci_lock_init(psci_non_cpu_pd_nodes, node_idx); |
| 65 | psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx; |
| 66 | psci_non_cpu_pd_nodes[node_idx].local_state = |
| 67 | PLAT_MAX_OFF_STATE; |
| 68 | } else { |
| 69 | psci_cpu_data_t *svc_cpu_data; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 70 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 71 | psci_cpu_pd_nodes[node_idx].parent_node = parent_idx; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 72 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 73 | /* Initialize with an invalid mpidr */ |
| 74 | psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR; |
Soby Mathew | 2b69750 | 2014-10-02 17:24:19 +0100 | [diff] [blame] | 75 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 76 | svc_cpu_data = |
| 77 | &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data); |
Soby Mathew | 2b69750 | 2014-10-02 17:24:19 +0100 | [diff] [blame] | 78 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 79 | /* Set the Affinity Info for the cores as OFF */ |
| 80 | svc_cpu_data->aff_info_state = AFF_STATE_OFF; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 81 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 82 | /* Invalidate the suspend level for the cpu */ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 83 | svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 84 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 85 | /* Set the power state to OFF state */ |
| 86 | svc_cpu_data->local_state = PLAT_MAX_OFF_STATE; |
Soby Mathew | 2b69750 | 2014-10-02 17:24:19 +0100 | [diff] [blame] | 87 | |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 88 | flush_dcache_range((uintptr_t)svc_cpu_data, |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 89 | sizeof(*svc_cpu_data)); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 90 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 91 | cm_set_context_by_index(node_idx, |
| 92 | (void *) &psci_ns_context[node_idx], |
| 93 | NON_SECURE); |
| 94 | } |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | /******************************************************************************* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 98 | * This functions updates cpu_start_idx and ncpus field for each of the node in |
| 99 | * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of |
| 100 | * the CPUs and check whether they match with the parent of the previous |
| 101 | * CPU. The basic assumption for this work is that children of the same parent |
| 102 | * are allocated adjacent indices. The platform should ensure this though proper |
| 103 | * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and |
| 104 | * plat_my_core_pos() APIs. |
| 105 | *******************************************************************************/ |
| 106 | static void psci_update_pwrlvl_limits(void) |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 107 | { |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 108 | int j; |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 109 | unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 110 | unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx; |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 111 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 112 | for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { |
| 113 | psci_get_parent_pwr_domain_nodes(cpu_idx, |
| 114 | PLAT_MAX_PWR_LVL, |
| 115 | temp_index); |
| 116 | for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { |
| 117 | if (temp_index[j] != nodes_idx[j]) { |
| 118 | nodes_idx[j] = temp_index[j]; |
| 119 | psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx |
| 120 | = cpu_idx; |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 121 | } |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 122 | psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++; |
| 123 | } |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 124 | } |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | /******************************************************************************* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 128 | * Core routine to populate the power domain tree. The tree descriptor passed by |
| 129 | * the platform is populated breadth-first and the first entry in the map |
| 130 | * informs the number of root power domains. The parent nodes of the root nodes |
| 131 | * will point to an invalid entry(-1). |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 132 | ******************************************************************************/ |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 133 | static void populate_power_domain_tree(const unsigned char *topology) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 134 | { |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 135 | unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl; |
| 136 | unsigned int node_index = 0, parent_node_index = 0, num_children; |
| 137 | int level = PLAT_MAX_PWR_LVL; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 138 | |
| 139 | /* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 140 | * For each level the inputs are: |
| 141 | * - number of nodes at this level in plat_array i.e. num_nodes_at_level |
| 142 | * This is the sum of values of nodes at the parent level. |
| 143 | * - Index of first entry at this level in the plat_array i.e. |
| 144 | * parent_node_index. |
| 145 | * - Index of first free entry in psci_non_cpu_pd_nodes[] or |
| 146 | * psci_cpu_pd_nodes[] i.e. node_index depending upon the level. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 147 | */ |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 148 | while (level >= PSCI_CPU_PWR_LVL) { |
| 149 | num_nodes_at_next_lvl = 0; |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 150 | /* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 151 | * For each entry (parent node) at this level in the plat_array: |
| 152 | * - Find the number of children |
| 153 | * - Allocate a node in a power domain array for each child |
| 154 | * - Set the parent of the child to the parent_node_index - 1 |
| 155 | * - Increment parent_node_index to point to the next parent |
| 156 | * - Accumulate the number of children at next level. |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 157 | */ |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 158 | for (i = 0; i < num_nodes_at_lvl; i++) { |
| 159 | assert(parent_node_index <= |
| 160 | PSCI_NUM_NON_CPU_PWR_DOMAINS); |
| 161 | num_children = topology[parent_node_index]; |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 162 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 163 | for (j = node_index; |
| 164 | j < node_index + num_children; j++) |
| 165 | psci_init_pwr_domain_node(j, |
| 166 | parent_node_index - 1, |
| 167 | level); |
Achin Gupta | f3ccbab | 2014-07-25 14:52:47 +0100 | [diff] [blame] | 168 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 169 | node_index = j; |
| 170 | num_nodes_at_next_lvl += num_children; |
| 171 | parent_node_index++; |
| 172 | } |
Achin Gupta | f6b9e99 | 2014-07-31 11:19:11 +0100 | [diff] [blame] | 173 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 174 | num_nodes_at_lvl = num_nodes_at_next_lvl; |
| 175 | level--; |
Soby Mathew | 7d861ea | 2014-11-18 10:14:14 +0000 | [diff] [blame] | 176 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 177 | /* Reset the index for the cpu power domain array */ |
| 178 | if (level == PSCI_CPU_PWR_LVL) |
| 179 | node_index = 0; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 180 | } |
| 181 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 182 | /* Validate the sanity of array exported by the platform */ |
| 183 | assert(j == PLATFORM_CORE_COUNT); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | /******************************************************************************* |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 187 | * This function does the architectural setup and takes the warm boot |
| 188 | * entry-point `mailbox_ep` as an argument. The function also initializes the |
| 189 | * power domain topology tree by querying the platform. The power domain nodes |
| 190 | * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and |
| 191 | * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform |
| 192 | * exports its static topology map through the |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 193 | * populate_power_domain_topology_tree() API. The algorithm populates the |
| 194 | * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 195 | * topology map. On a platform that implements two clusters of 2 cpus each, |
| 196 | * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would |
| 197 | * look like this: |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 198 | * |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 199 | * --------------------------------------------------- |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 200 | * | system node | cluster 0 node | cluster 1 node | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 201 | * --------------------------------------------------- |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 202 | * |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 203 | * And populated psci_cpu_pd_nodes would look like this : |
| 204 | * <- cpus cluster0 -><- cpus cluster1 -> |
| 205 | * ------------------------------------------------ |
| 206 | * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | |
| 207 | * ------------------------------------------------ |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 208 | ******************************************************************************/ |
Soby Mathew | 89256b8 | 2016-09-13 14:19:08 +0100 | [diff] [blame] | 209 | int psci_setup(const psci_lib_args_t *lib_args) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 210 | { |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 211 | const unsigned char *topology_tree; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 212 | |
Soby Mathew | 89256b8 | 2016-09-13 14:19:08 +0100 | [diff] [blame] | 213 | assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args)); |
| 214 | |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 215 | /* Do the Architectural initialization */ |
| 216 | psci_arch_setup(); |
| 217 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 218 | /* Query the topology map from the platform */ |
| 219 | topology_tree = plat_get_power_domain_tree_desc(); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 220 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 221 | /* Populate the power domain arrays using the platform topology map */ |
| 222 | populate_power_domain_tree(topology_tree); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 223 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 224 | /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ |
| 225 | psci_update_pwrlvl_limits(); |
| 226 | |
| 227 | /* Populate the mpidr field of cpu node for this CPU */ |
| 228 | psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = |
| 229 | read_mpidr() & MPIDR_AFFINITY_MASK; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 230 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 231 | psci_init_req_local_pwr_states(); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 232 | |
| 233 | /* |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 234 | * Set the requested and target state of this CPU and all the higher |
| 235 | * power domain levels for this CPU to run. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 236 | */ |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 237 | psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 238 | |
Soby Mathew | 89256b8 | 2016-09-13 14:19:08 +0100 | [diff] [blame] | 239 | plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 240 | assert(psci_plat_pm_ops); |
| 241 | |
Soby Mathew | 7c9d5f8 | 2016-09-09 11:33:58 +0100 | [diff] [blame] | 242 | /* |
| 243 | * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs |
| 244 | * during warm boot before data cache is enabled. |
| 245 | */ |
| 246 | flush_dcache_range((uintptr_t)&psci_plat_pm_ops, |
| 247 | sizeof(psci_plat_pm_ops)); |
| 248 | |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 249 | /* Initialize the psci capability */ |
| 250 | psci_caps = PSCI_GENERIC_CAP; |
| 251 | |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 252 | if (psci_plat_pm_ops->pwr_domain_off) |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 253 | psci_caps |= define_psci_cap(PSCI_CPU_OFF); |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 254 | if (psci_plat_pm_ops->pwr_domain_on && |
| 255 | psci_plat_pm_ops->pwr_domain_on_finish) |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 256 | psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); |
Soby Mathew | 981487a | 2015-07-13 14:10:57 +0100 | [diff] [blame] | 257 | if (psci_plat_pm_ops->pwr_domain_suspend && |
| 258 | psci_plat_pm_ops->pwr_domain_suspend_finish) { |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 259 | psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); |
Soby Mathew | 9616838 | 2014-12-17 14:47:57 +0000 | [diff] [blame] | 260 | if (psci_plat_pm_ops->get_sys_suspend_power_state) |
| 261 | psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); |
| 262 | } |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 263 | if (psci_plat_pm_ops->system_off) |
| 264 | psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); |
| 265 | if (psci_plat_pm_ops->system_reset) |
| 266 | psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); |
Jeenu Viswambharan | 7f03e9d9 | 2016-08-03 15:54:50 +0100 | [diff] [blame] | 267 | if (psci_plat_pm_ops->get_node_hw_state) |
| 268 | psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64); |
Soby Mathew | 6cdddaf | 2015-01-07 11:10:22 +0000 | [diff] [blame] | 269 | |
Yatharth Kochar | 241ec6c | 2016-05-09 18:26:35 +0100 | [diff] [blame] | 270 | #if ENABLE_PSCI_STAT |
| 271 | psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64); |
| 272 | psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64); |
| 273 | #endif |
| 274 | |
Achin Gupta | 7421b46 | 2014-02-01 18:53:26 +0000 | [diff] [blame] | 275 | return 0; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 276 | } |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 277 | |
| 278 | /******************************************************************************* |
| 279 | * This duplicates what the primary cpu did after a cold boot in BL1. The same |
| 280 | * needs to be done when a cpu is hotplugged in. This function could also over- |
| 281 | * ride any EL3 setup done by BL1 as this code resides in rw memory. |
| 282 | ******************************************************************************/ |
| 283 | void psci_arch_setup(void) |
| 284 | { |
| 285 | /* Program the counter frequency */ |
| 286 | write_cntfrq_el0(plat_get_syscnt_freq2()); |
| 287 | |
| 288 | /* Initialize the cpu_ops pointer. */ |
| 289 | init_cpu_ops(); |
| 290 | } |
Soby Mathew | 89d90dc | 2016-05-05 14:11:23 +0100 | [diff] [blame] | 291 | |
| 292 | /****************************************************************************** |
| 293 | * PSCI Library interface to initialize the cpu context for the next non |
| 294 | * secure image during cold boot. The relevant registers in the cpu context |
| 295 | * need to be retrieved and programmed on return from this interface. |
| 296 | *****************************************************************************/ |
| 297 | void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info) |
| 298 | { |
| 299 | assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE); |
| 300 | cm_init_my_context(next_image_info); |
| 301 | cm_prepare_el3_exit(NON_SECURE); |
| 302 | } |