Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 1 | /* |
Dan Handley | e83b0ca | 2014-01-14 18:17:09 +0000 | [diff] [blame] | 2 | * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are met: |
| 6 | * |
| 7 | * Redistributions of source code must retain the above copyright notice, this |
| 8 | * list of conditions and the following disclaimer. |
| 9 | * |
| 10 | * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * Neither the name of ARM nor the names of its contributors may be used |
| 15 | * to endorse or promote products derived from this software without specific |
| 16 | * prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 28 | * POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 31 | #include <assert.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 32 | #include <bl_common.h> |
| 33 | #include <arch.h> |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 34 | #include <arch_helpers.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 35 | #include <context.h> |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 36 | #include <context_mgmt.h> |
Dan Handley | bcd60ba | 2014-04-17 18:53:42 +0100 | [diff] [blame] | 37 | #include <runtime_svc.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 38 | #include <stddef.h> |
Dan Handley | 714a0d2 | 2014-04-09 13:13:04 +0100 | [diff] [blame] | 39 | #include "psci_private.h" |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 40 | |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 41 | typedef int (*afflvl_suspend_handler_t)(aff_map_node_t *, |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 42 | unsigned long, |
| 43 | unsigned long, |
| 44 | unsigned int); |
| 45 | |
| 46 | /******************************************************************************* |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 47 | * This function sets the power state of the current cpu while |
| 48 | * powering down during a cpu_suspend call |
Achin Gupta | a45e397 | 2013-12-05 15:10:48 +0000 | [diff] [blame] | 49 | ******************************************************************************/ |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 50 | void psci_set_suspend_power_state(aff_map_node_t *node, unsigned int power_state) |
Achin Gupta | a45e397 | 2013-12-05 15:10:48 +0000 | [diff] [blame] | 51 | { |
| 52 | /* |
| 53 | * Check that nobody else is calling this function on our behalf & |
| 54 | * this information is being set only in the cpu node |
| 55 | */ |
| 56 | assert(node->mpidr == (read_mpidr() & MPIDR_AFFINITY_MASK)); |
| 57 | assert(node->level == MPIDR_AFFLVL0); |
| 58 | |
| 59 | /* |
Andrew Thoelke | e9a0d11 | 2014-06-20 00:38:03 +0100 | [diff] [blame] | 60 | * Save PSCI power state parameter for the core in suspend context. |
| 61 | * The node is in always-coherent RAM so it does not need to be flushed |
Achin Gupta | a45e397 | 2013-12-05 15:10:48 +0000 | [diff] [blame] | 62 | */ |
Andrew Thoelke | e9a0d11 | 2014-06-20 00:38:03 +0100 | [diff] [blame] | 63 | node->power_state = power_state; |
Achin Gupta | a45e397 | 2013-12-05 15:10:48 +0000 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | /******************************************************************************* |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 67 | * This function gets the affinity level till which a cpu is powered down |
| 68 | * during a cpu_suspend call. Returns PSCI_INVALID_DATA if the |
| 69 | * power state saved for the node is invalid |
| 70 | ******************************************************************************/ |
| 71 | int psci_get_suspend_afflvl(unsigned long mpidr) |
| 72 | { |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 73 | aff_map_node_t *node; |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 74 | |
| 75 | node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK, |
| 76 | MPIDR_AFFLVL0); |
| 77 | assert(node); |
| 78 | |
| 79 | return psci_get_aff_map_node_suspend_afflvl(node); |
| 80 | } |
| 81 | |
| 82 | |
| 83 | /******************************************************************************* |
Achin Gupta | a45e397 | 2013-12-05 15:10:48 +0000 | [diff] [blame] | 84 | * This function gets the affinity level till which the current cpu was powered |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 85 | * down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the |
| 86 | * power state saved for the node is invalid |
| 87 | ******************************************************************************/ |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 88 | int psci_get_aff_map_node_suspend_afflvl(aff_map_node_t *node) |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 89 | { |
| 90 | unsigned int power_state; |
| 91 | |
| 92 | assert(node->level == MPIDR_AFFLVL0); |
| 93 | |
Andrew Thoelke | e9a0d11 | 2014-06-20 00:38:03 +0100 | [diff] [blame] | 94 | power_state = node->power_state; |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 95 | return ((power_state == PSCI_INVALID_DATA) ? |
| 96 | power_state : psci_get_pstate_afflvl(power_state)); |
| 97 | } |
| 98 | |
| 99 | /******************************************************************************* |
| 100 | * This function gets the state id of a cpu stored in suspend context |
| 101 | * while powering down during a cpu_suspend call. Returns 0xFFFFFFFF |
| 102 | * if the power state saved for the node is invalid |
Achin Gupta | a45e397 | 2013-12-05 15:10:48 +0000 | [diff] [blame] | 103 | ******************************************************************************/ |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 104 | int psci_get_suspend_stateid(unsigned long mpidr) |
Achin Gupta | a45e397 | 2013-12-05 15:10:48 +0000 | [diff] [blame] | 105 | { |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 106 | aff_map_node_t *node; |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 107 | unsigned int power_state; |
| 108 | |
| 109 | node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK, |
| 110 | MPIDR_AFFLVL0); |
| 111 | assert(node); |
| 112 | assert(node->level == MPIDR_AFFLVL0); |
| 113 | |
Andrew Thoelke | e9a0d11 | 2014-06-20 00:38:03 +0100 | [diff] [blame] | 114 | power_state = node->power_state; |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 115 | return ((power_state == PSCI_INVALID_DATA) ? |
| 116 | power_state : psci_get_pstate_id(power_state)); |
Achin Gupta | a45e397 | 2013-12-05 15:10:48 +0000 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | /******************************************************************************* |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 120 | * The next three functions implement a handler for each supported affinity |
| 121 | * level which is called when that affinity level is about to be suspended. |
| 122 | ******************************************************************************/ |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 123 | static int psci_afflvl0_suspend(aff_map_node_t *cpu_node, |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 124 | unsigned long ns_entrypoint, |
| 125 | unsigned long context_id, |
| 126 | unsigned int power_state) |
| 127 | { |
Andrew Thoelke | 4e12607 | 2014-06-04 21:10:52 +0100 | [diff] [blame] | 128 | unsigned int plat_state; |
Vikram Kanigiri | 78a6e0c | 2014-03-11 17:41:00 +0000 | [diff] [blame] | 129 | unsigned long psci_entrypoint, sctlr; |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 130 | el3_state_t *saved_el3_state; |
Andrew Thoelke | 4e12607 | 2014-06-04 21:10:52 +0100 | [diff] [blame] | 131 | uint32_t ns_scr_el3 = read_scr_el3(); |
| 132 | uint32_t ns_sctlr_el1 = read_sctlr_el1(); |
| 133 | int rc; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 134 | |
| 135 | /* Sanity check to safeguard against data corruption */ |
| 136 | assert(cpu_node->level == MPIDR_AFFLVL0); |
| 137 | |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 138 | /* Save PSCI power state parameter for the core in suspend context */ |
| 139 | psci_set_suspend_power_state(cpu_node, power_state); |
| 140 | |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 141 | /* |
| 142 | * Generic management: Store the re-entry information for the non-secure |
| 143 | * world and allow the secure world to suspend itself |
| 144 | */ |
| 145 | |
| 146 | /* |
| 147 | * Call the cpu suspend handler registered by the Secure Payload |
| 148 | * Dispatcher to let it do any bookeeping. If the handler encounters an |
| 149 | * error, it's expected to assert within |
| 150 | */ |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 151 | if (psci_spd_pm && psci_spd_pm->svc_suspend) |
| 152 | psci_spd_pm->svc_suspend(power_state); |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 153 | |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 154 | /* State management: mark this cpu as suspended */ |
| 155 | psci_set_state(cpu_node, PSCI_STATE_SUSPEND); |
| 156 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 157 | /* |
| 158 | * Generic management: Store the re-entry information for the |
| 159 | * non-secure world |
| 160 | */ |
Andrew Thoelke | 4e12607 | 2014-06-04 21:10:52 +0100 | [diff] [blame] | 161 | rc = psci_save_ns_entry(read_mpidr_el1(), ns_entrypoint, context_id, |
| 162 | ns_scr_el3, ns_sctlr_el1); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 163 | if (rc != PSCI_E_SUCCESS) |
| 164 | return rc; |
| 165 | |
| 166 | /* |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 167 | * Arch. management: Save the EL3 state in the 'cpu_context' |
| 168 | * structure that has been allocated for this cpu, flush the |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 169 | * L1 caches and exit intra-cluster coherency et al |
| 170 | */ |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 171 | cm_el3_sysregs_context_save(NON_SECURE); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 172 | |
Achin Gupta | 0a9f747 | 2014-02-09 17:48:12 +0000 | [diff] [blame] | 173 | /* |
| 174 | * The EL3 state to PoC since it will be accessed after a |
| 175 | * reset with the caches turned off |
| 176 | */ |
Andrew Thoelke | a2f6553 | 2014-05-14 17:09:32 +0100 | [diff] [blame] | 177 | saved_el3_state = get_el3state_ctx(cm_get_context(NON_SECURE)); |
Achin Gupta | 0a9f747 | 2014-02-09 17:48:12 +0000 | [diff] [blame] | 178 | flush_dcache_range((uint64_t) saved_el3_state, sizeof(*saved_el3_state)); |
| 179 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 180 | /* Set the secure world (EL3) re-entry point after BL1 */ |
| 181 | psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; |
| 182 | |
| 183 | /* |
| 184 | * Arch. management. Perform the necessary steps to flush all |
| 185 | * cpu caches. |
| 186 | * |
| 187 | * TODO: This power down sequence varies across cpus so it needs to be |
| 188 | * abstracted out on the basis of the MIDR like in cpu_reset_handler(). |
| 189 | * Do the bare minimal for the time being. Fix this before porting to |
| 190 | * Cortex models. |
| 191 | */ |
Vikram Kanigiri | 78a6e0c | 2014-03-11 17:41:00 +0000 | [diff] [blame] | 192 | sctlr = read_sctlr_el3(); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 193 | sctlr &= ~SCTLR_C_BIT; |
Vikram Kanigiri | 78a6e0c | 2014-03-11 17:41:00 +0000 | [diff] [blame] | 194 | write_sctlr_el3(sctlr); |
Andrew Thoelke | 42e75a7 | 2014-04-28 12:28:39 +0100 | [diff] [blame] | 195 | isb(); /* ensure MMU disable takes immediate effect */ |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 196 | |
| 197 | /* |
| 198 | * CAUTION: This flush to the level of unification makes an assumption |
| 199 | * about the cache hierarchy at affinity level 0 (cpu) in the platform. |
| 200 | * Ideally the platform should tell psci which levels to flush to exit |
| 201 | * coherency. |
| 202 | */ |
| 203 | dcsw_op_louis(DCCISW); |
| 204 | |
| 205 | /* |
| 206 | * Plat. management: Allow the platform to perform the |
| 207 | * necessary actions to turn off this cpu e.g. set the |
| 208 | * platform defined mailbox with the psci entrypoint, |
| 209 | * program the power controller etc. |
| 210 | */ |
Andrew Thoelke | 4e12607 | 2014-06-04 21:10:52 +0100 | [diff] [blame] | 211 | rc = PSCI_E_SUCCESS; |
| 212 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 213 | if (psci_plat_pm_ops->affinst_suspend) { |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 214 | plat_state = psci_get_phys_state(cpu_node); |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 215 | rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(), |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 216 | psci_entrypoint, |
| 217 | ns_entrypoint, |
| 218 | cpu_node->level, |
| 219 | plat_state); |
| 220 | } |
| 221 | |
| 222 | return rc; |
| 223 | } |
| 224 | |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 225 | static int psci_afflvl1_suspend(aff_map_node_t *cluster_node, |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 226 | unsigned long ns_entrypoint, |
| 227 | unsigned long context_id, |
| 228 | unsigned int power_state) |
| 229 | { |
| 230 | int rc = PSCI_E_SUCCESS; |
| 231 | unsigned int plat_state; |
| 232 | unsigned long psci_entrypoint; |
| 233 | |
| 234 | /* Sanity check the cluster level */ |
| 235 | assert(cluster_node->level == MPIDR_AFFLVL1); |
| 236 | |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 237 | /* State management: Decrement the cluster reference count */ |
| 238 | psci_set_state(cluster_node, PSCI_STATE_SUSPEND); |
| 239 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 240 | /* |
| 241 | * Keep the physical state of this cluster handy to decide |
| 242 | * what action needs to be taken |
| 243 | */ |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 244 | plat_state = psci_get_phys_state(cluster_node); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 245 | |
| 246 | /* |
| 247 | * Arch. management: Flush all levels of caches to PoC if the |
| 248 | * cluster is to be shutdown |
| 249 | */ |
| 250 | if (plat_state == PSCI_STATE_OFF) |
| 251 | dcsw_op_all(DCCISW); |
| 252 | |
| 253 | /* |
Achin Gupta | 3140a9e | 2013-12-02 16:23:12 +0000 | [diff] [blame] | 254 | * Plat. Management. Allow the platform to do its cluster |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 255 | * specific bookeeping e.g. turn off interconnect coherency, |
| 256 | * program the power controller etc. |
| 257 | */ |
| 258 | if (psci_plat_pm_ops->affinst_suspend) { |
| 259 | |
| 260 | /* |
| 261 | * Sending the psci entrypoint is currently redundant |
| 262 | * beyond affinity level 0 but one never knows what a |
| 263 | * platform might do. Also it allows us to keep the |
| 264 | * platform handler prototype the same. |
| 265 | */ |
| 266 | psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 267 | rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(), |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 268 | psci_entrypoint, |
| 269 | ns_entrypoint, |
| 270 | cluster_node->level, |
| 271 | plat_state); |
| 272 | } |
| 273 | |
| 274 | return rc; |
| 275 | } |
| 276 | |
| 277 | |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 278 | static int psci_afflvl2_suspend(aff_map_node_t *system_node, |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 279 | unsigned long ns_entrypoint, |
| 280 | unsigned long context_id, |
| 281 | unsigned int power_state) |
| 282 | { |
| 283 | int rc = PSCI_E_SUCCESS; |
| 284 | unsigned int plat_state; |
| 285 | unsigned long psci_entrypoint; |
| 286 | |
| 287 | /* Cannot go beyond this */ |
| 288 | assert(system_node->level == MPIDR_AFFLVL2); |
| 289 | |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 290 | /* State management: Decrement the system reference count */ |
| 291 | psci_set_state(system_node, PSCI_STATE_SUSPEND); |
| 292 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 293 | /* |
| 294 | * Keep the physical state of the system handy to decide what |
| 295 | * action needs to be taken |
| 296 | */ |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 297 | plat_state = psci_get_phys_state(system_node); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 298 | |
| 299 | /* |
Achin Gupta | 3140a9e | 2013-12-02 16:23:12 +0000 | [diff] [blame] | 300 | * Plat. Management : Allow the platform to do its bookeeping |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 301 | * at this affinity level |
| 302 | */ |
| 303 | if (psci_plat_pm_ops->affinst_suspend) { |
| 304 | |
| 305 | /* |
| 306 | * Sending the psci entrypoint is currently redundant |
| 307 | * beyond affinity level 0 but one never knows what a |
| 308 | * platform might do. Also it allows us to keep the |
| 309 | * platform handler prototype the same. |
| 310 | */ |
| 311 | psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 312 | rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(), |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 313 | psci_entrypoint, |
| 314 | ns_entrypoint, |
| 315 | system_node->level, |
| 316 | plat_state); |
| 317 | } |
| 318 | |
| 319 | return rc; |
| 320 | } |
| 321 | |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 322 | static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = { |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 323 | psci_afflvl0_suspend, |
| 324 | psci_afflvl1_suspend, |
| 325 | psci_afflvl2_suspend, |
| 326 | }; |
| 327 | |
| 328 | /******************************************************************************* |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 329 | * This function takes an array of pointers to affinity instance nodes in the |
| 330 | * topology tree and calls the suspend handler for the corresponding affinity |
| 331 | * levels |
| 332 | ******************************************************************************/ |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 333 | static int psci_call_suspend_handlers(mpidr_aff_map_nodes_t mpidr_nodes, |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 334 | int start_afflvl, |
| 335 | int end_afflvl, |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 336 | unsigned long entrypoint, |
| 337 | unsigned long context_id, |
| 338 | unsigned int power_state) |
| 339 | { |
| 340 | int rc = PSCI_E_INVALID_PARAMS, level; |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 341 | aff_map_node_t *node; |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 342 | |
| 343 | for (level = start_afflvl; level <= end_afflvl; level++) { |
| 344 | node = mpidr_nodes[level]; |
| 345 | if (node == NULL) |
| 346 | continue; |
| 347 | |
| 348 | /* |
| 349 | * TODO: In case of an error should there be a way |
| 350 | * of restoring what we might have torn down at |
| 351 | * lower affinity levels. |
| 352 | */ |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 353 | rc = psci_afflvl_suspend_handlers[level](node, |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 354 | entrypoint, |
| 355 | context_id, |
| 356 | power_state); |
| 357 | if (rc != PSCI_E_SUCCESS) |
| 358 | break; |
| 359 | } |
| 360 | |
| 361 | return rc; |
| 362 | } |
| 363 | |
| 364 | /******************************************************************************* |
| 365 | * Top level handler which is called when a cpu wants to suspend its execution. |
| 366 | * It is assumed that along with turning the cpu off, higher affinity levels |
| 367 | * until the target affinity level will be turned off as well. It traverses |
| 368 | * through all the affinity levels performing generic, architectural, platform |
| 369 | * setup and state management e.g. for a cluster that's to be suspended, it will |
| 370 | * call the platform specific code which will disable coherency at the |
| 371 | * interconnect level if the cpu is the last in the cluster. For a cpu it could |
| 372 | * mean programming the power controller etc. |
| 373 | * |
| 374 | * The state of all the relevant affinity levels is changed prior to calling the |
| 375 | * affinity level specific handlers as their actions would depend upon the state |
| 376 | * the affinity level is about to enter. |
| 377 | * |
| 378 | * The affinity level specific handlers are called in ascending order i.e. from |
| 379 | * the lowest to the highest affinity level implemented by the platform because |
| 380 | * to turn off affinity level X it is neccesary to turn off affinity level X - 1 |
| 381 | * first. |
| 382 | * |
| 383 | * CAUTION: This function is called with coherent stacks so that coherency can |
| 384 | * be turned off and caches can be flushed safely. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 385 | ******************************************************************************/ |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 386 | int psci_afflvl_suspend(unsigned long entrypoint, |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 387 | unsigned long context_id, |
| 388 | unsigned int power_state, |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 389 | int start_afflvl, |
| 390 | int end_afflvl) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 391 | { |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 392 | int rc = PSCI_E_SUCCESS; |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 393 | mpidr_aff_map_nodes_t mpidr_nodes; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 394 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 395 | /* |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 396 | * Collect the pointers to the nodes in the topology tree for |
| 397 | * each affinity instance in the mpidr. If this function does |
| 398 | * not return successfully then either the mpidr or the affinity |
| 399 | * levels are incorrect. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 400 | */ |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 401 | rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 402 | start_afflvl, |
| 403 | end_afflvl, |
| 404 | mpidr_nodes); |
| 405 | if (rc != PSCI_E_SUCCESS) |
| 406 | return rc; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 407 | |
| 408 | /* |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 409 | * This function acquires the lock corresponding to each affinity |
| 410 | * level so that by the time all locks are taken, the system topology |
| 411 | * is snapshot and state management can be done safely. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 412 | */ |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 413 | psci_acquire_afflvl_locks(start_afflvl, |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 414 | end_afflvl, |
| 415 | mpidr_nodes); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 416 | |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 417 | /* Perform generic, architecture and platform specific handling */ |
| 418 | rc = psci_call_suspend_handlers(mpidr_nodes, |
| 419 | start_afflvl, |
| 420 | end_afflvl, |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 421 | entrypoint, |
| 422 | context_id, |
| 423 | power_state); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 424 | |
| 425 | /* |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 426 | * Release the locks corresponding to each affinity level in the |
| 427 | * reverse order to which they were acquired. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 428 | */ |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 429 | psci_release_afflvl_locks(start_afflvl, |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 430 | end_afflvl, |
| 431 | mpidr_nodes); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 432 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 433 | return rc; |
| 434 | } |
| 435 | |
| 436 | /******************************************************************************* |
| 437 | * The following functions finish an earlier affinity suspend request. They |
| 438 | * are called by the common finisher routine in psci_common.c. |
| 439 | ******************************************************************************/ |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 440 | static unsigned int psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 441 | { |
Andrew Thoelke | 4e12607 | 2014-06-04 21:10:52 +0100 | [diff] [blame] | 442 | unsigned int plat_state, state, rc; |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 443 | int32_t suspend_level; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 444 | |
| 445 | assert(cpu_node->level == MPIDR_AFFLVL0); |
| 446 | |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 447 | /* Ensure we have been woken up from a suspended state */ |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 448 | state = psci_get_state(cpu_node); |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 449 | assert(state == PSCI_STATE_SUSPEND); |
| 450 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 451 | /* |
| 452 | * Plat. management: Perform the platform specific actions |
| 453 | * before we change the state of the cpu e.g. enabling the |
| 454 | * gic or zeroing the mailbox register. If anything goes |
| 455 | * wrong then assert as there is no way to recover from this |
| 456 | * situation. |
| 457 | */ |
| 458 | if (psci_plat_pm_ops->affinst_suspend_finish) { |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 459 | |
| 460 | /* Get the physical state of this cpu */ |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 461 | plat_state = get_phys_state(state); |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 462 | rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(), |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 463 | cpu_node->level, |
| 464 | plat_state); |
| 465 | assert(rc == PSCI_E_SUCCESS); |
| 466 | } |
| 467 | |
| 468 | /* Get the index for restoring the re-entry information */ |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 469 | /* |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 470 | * Arch. management: Restore the stashed EL3 architectural |
| 471 | * context from the 'cpu_context' structure for this cpu. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 472 | */ |
Achin Gupta | ef7a28c | 2014-02-01 08:59:56 +0000 | [diff] [blame] | 473 | cm_el3_sysregs_context_restore(NON_SECURE); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 474 | |
| 475 | /* |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 476 | * Call the cpu suspend finish handler registered by the Secure Payload |
| 477 | * Dispatcher to let it do any bookeeping. If the handler encounters an |
| 478 | * error, it's expected to assert within |
| 479 | */ |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 480 | if (psci_spd_pm && psci_spd_pm->svc_suspend) { |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 481 | suspend_level = psci_get_aff_map_node_suspend_afflvl(cpu_node); |
| 482 | assert (suspend_level != PSCI_INVALID_DATA); |
Jeenu Viswambharan | 7f36660 | 2014-02-20 17:11:00 +0000 | [diff] [blame] | 483 | psci_spd_pm->svc_suspend_finish(suspend_level); |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 484 | } |
| 485 | |
Vikram Kanigiri | f100f41 | 2014-04-01 19:26:26 +0100 | [diff] [blame] | 486 | /* Invalidate the suspend context for the node */ |
| 487 | psci_set_suspend_power_state(cpu_node, PSCI_INVALID_DATA); |
| 488 | |
Achin Gupta | 607084e | 2014-02-09 18:24:19 +0000 | [diff] [blame] | 489 | /* |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 490 | * Generic management: Now we just need to retrieve the |
| 491 | * information that we had stashed away during the suspend |
Achin Gupta | 3140a9e | 2013-12-02 16:23:12 +0000 | [diff] [blame] | 492 | * call to set this cpu on its way. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 493 | */ |
Andrew Thoelke | 4e12607 | 2014-06-04 21:10:52 +0100 | [diff] [blame] | 494 | cm_prepare_el3_exit(NON_SECURE); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 495 | |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 496 | /* State management: mark this cpu as on */ |
| 497 | psci_set_state(cpu_node, PSCI_STATE_ON); |
| 498 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 499 | /* Clean caches before re-entering normal world */ |
| 500 | dcsw_op_louis(DCCSW); |
| 501 | |
Andrew Thoelke | 4e12607 | 2014-06-04 21:10:52 +0100 | [diff] [blame] | 502 | rc = PSCI_E_SUCCESS; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 503 | return rc; |
| 504 | } |
| 505 | |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 506 | static unsigned int psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 507 | { |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 508 | unsigned int plat_state, rc = PSCI_E_SUCCESS; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 509 | |
| 510 | assert(cluster_node->level == MPIDR_AFFLVL1); |
| 511 | |
| 512 | /* |
| 513 | * Plat. management: Perform the platform specific actions |
| 514 | * as per the old state of the cluster e.g. enabling |
| 515 | * coherency at the interconnect depends upon the state with |
| 516 | * which this cluster was powered up. If anything goes wrong |
| 517 | * then assert as there is no way to recover from this |
| 518 | * situation. |
| 519 | */ |
| 520 | if (psci_plat_pm_ops->affinst_suspend_finish) { |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 521 | |
| 522 | /* Get the physical state of this cpu */ |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 523 | plat_state = psci_get_phys_state(cluster_node); |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 524 | rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(), |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 525 | cluster_node->level, |
| 526 | plat_state); |
| 527 | assert(rc == PSCI_E_SUCCESS); |
| 528 | } |
| 529 | |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 530 | /* State management: Increment the cluster reference count */ |
| 531 | psci_set_state(cluster_node, PSCI_STATE_ON); |
| 532 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 533 | return rc; |
| 534 | } |
| 535 | |
| 536 | |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 537 | static unsigned int psci_afflvl2_suspend_finish(aff_map_node_t *system_node) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 538 | { |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 539 | unsigned int plat_state, rc = PSCI_E_SUCCESS;; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 540 | |
| 541 | /* Cannot go beyond this affinity level */ |
| 542 | assert(system_node->level == MPIDR_AFFLVL2); |
| 543 | |
| 544 | /* |
| 545 | * Currently, there are no architectural actions to perform |
| 546 | * at the system level. |
| 547 | */ |
| 548 | |
| 549 | /* |
| 550 | * Plat. management: Perform the platform specific actions |
| 551 | * as per the old state of the cluster e.g. enabling |
| 552 | * coherency at the interconnect depends upon the state with |
| 553 | * which this cluster was powered up. If anything goes wrong |
| 554 | * then assert as there is no way to recover from this |
| 555 | * situation. |
| 556 | */ |
| 557 | if (psci_plat_pm_ops->affinst_suspend_finish) { |
Achin Gupta | 0959db5 | 2013-12-02 17:33:04 +0000 | [diff] [blame] | 558 | |
| 559 | /* Get the physical state of the system */ |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 560 | plat_state = psci_get_phys_state(system_node); |
Andrew Thoelke | 2bc0785 | 2014-06-09 12:44:21 +0100 | [diff] [blame] | 561 | rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(), |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 562 | system_node->level, |
| 563 | plat_state); |
| 564 | assert(rc == PSCI_E_SUCCESS); |
| 565 | } |
| 566 | |
Achin Gupta | 75f7367 | 2013-12-05 16:33:10 +0000 | [diff] [blame] | 567 | /* State management: Increment the system reference count */ |
| 568 | psci_set_state(system_node, PSCI_STATE_ON); |
| 569 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 570 | return rc; |
| 571 | } |
| 572 | |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 573 | const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = { |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 574 | psci_afflvl0_suspend_finish, |
| 575 | psci_afflvl1_suspend_finish, |
| 576 | psci_afflvl2_suspend_finish, |
| 577 | }; |
| 578 | |