Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 1 | /* |
Zelalem Aweke | f92c0cb | 2022-01-31 16:59:42 -0600 | [diff] [blame] | 2 | * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 7 | #include <assert.h> |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 8 | #include <stddef.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 9 | |
| 10 | #include <arch.h> |
| 11 | #include <arch_helpers.h> |
| 12 | #include <common/bl_common.h> |
| 13 | #include <common/debug.h> |
| 14 | #include <lib/el3_runtime/context_mgmt.h> |
| 15 | #include <lib/el3_runtime/pubsub_events.h> |
| 16 | #include <plat/common/platform.h> |
| 17 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 18 | #include "psci_private.h" |
| 19 | |
Antonio Nino Diaz | 5a42b68 | 2018-07-18 11:57:21 +0100 | [diff] [blame] | 20 | /* |
| 21 | * Helper functions for the CPU level spinlocks |
| 22 | */ |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 23 | static inline void psci_spin_lock_cpu(unsigned int idx) |
Antonio Nino Diaz | 5a42b68 | 2018-07-18 11:57:21 +0100 | [diff] [blame] | 24 | { |
| 25 | spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock); |
| 26 | } |
| 27 | |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 28 | static inline void psci_spin_unlock_cpu(unsigned int idx) |
Antonio Nino Diaz | 5a42b68 | 2018-07-18 11:57:21 +0100 | [diff] [blame] | 29 | { |
| 30 | spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock); |
| 31 | } |
| 32 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 33 | /******************************************************************************* |
| 34 | * This function checks whether a cpu which has been requested to be turned on |
| 35 | * is OFF to begin with. |
| 36 | ******************************************************************************/ |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 37 | static int cpu_on_validate_state(aff_info_state_t aff_state) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 38 | { |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 39 | if (aff_state == AFF_STATE_ON) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 40 | return PSCI_E_ALREADY_ON; |
| 41 | |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 42 | if (aff_state == AFF_STATE_ON_PENDING) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 43 | return PSCI_E_ON_PENDING; |
| 44 | |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 45 | assert(aff_state == AFF_STATE_OFF); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 46 | return PSCI_E_SUCCESS; |
| 47 | } |
| 48 | |
| 49 | /******************************************************************************* |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 50 | * Generic handler which is called to physically power on a cpu identified by |
Soby Mathew | 6b8b302 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 51 | * its mpidr. It performs the generic, architectural, platform setup and state |
| 52 | * management to power on the target cpu e.g. it will ensure that |
| 53 | * enough information is stashed for it to resume execution in the non-secure |
| 54 | * security state. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 55 | * |
Soby Mathew | 3a9e8bf | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 56 | * The state of all the relevant power domains are changed after calling the |
Soby Mathew | 6b8b302 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 57 | * platform handler as it can return error. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 58 | ******************************************************************************/ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 59 | int psci_cpu_on_start(u_register_t target_cpu, |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 60 | const entry_point_info_t *ep) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 61 | { |
| 62 | int rc; |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 63 | aff_info_state_t target_aff_state; |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 64 | int ret = plat_core_pos_by_mpidr(target_cpu); |
Olivier Deprez | 764b1ad | 2023-04-11 10:00:21 +0200 | [diff] [blame] | 65 | unsigned int target_idx; |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 66 | |
Sandrine Bailleux | 6181acb | 2016-04-22 13:00:19 +0100 | [diff] [blame] | 67 | /* Calling function must supply valid input arguments */ |
Sandrine Bailleux | 6181acb | 2016-04-22 13:00:19 +0100 | [diff] [blame] | 68 | assert(ep != NULL); |
| 69 | |
Olivier Deprez | 764b1ad | 2023-04-11 10:00:21 +0200 | [diff] [blame] | 70 | if ((ret < 0) || (ret >= (int)PLATFORM_CORE_COUNT)) { |
| 71 | ERROR("Unexpected core index.\n"); |
| 72 | panic(); |
| 73 | } |
| 74 | |
| 75 | target_idx = (unsigned int)ret; |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 76 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 77 | /* |
| 78 | * This function must only be called on platforms where the |
| 79 | * CPU_ON platform hooks have been implemented. |
| 80 | */ |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 81 | assert((psci_plat_pm_ops->pwr_domain_on != NULL) && |
| 82 | (psci_plat_pm_ops->pwr_domain_on_finish != NULL)); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 83 | |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 84 | /* Protect against multiple CPUs trying to turn ON the same target CPU */ |
| 85 | psci_spin_lock_cpu(target_idx); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 86 | |
| 87 | /* |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 88 | * Generic management: Ensure that the cpu is off to be |
| 89 | * turned on. |
David Cunado | 06adba2 | 2017-07-19 12:14:07 +0100 | [diff] [blame] | 90 | * Perform cache maintanence ahead of reading the target CPU state to |
| 91 | * ensure that the data is not stale. |
| 92 | * There is a theoretical edge case where the cache may contain stale |
| 93 | * data for the target CPU data - this can occur under the following |
| 94 | * conditions: |
| 95 | * - the target CPU is in another cluster from the current |
| 96 | * - the target CPU was the last CPU to shutdown on its cluster |
| 97 | * - the cluster was removed from coherency as part of the CPU shutdown |
| 98 | * |
| 99 | * In this case the cache maintenace that was performed as part of the |
| 100 | * target CPUs shutdown was not seen by the current CPU's cluster. And |
| 101 | * so the cache may contain stale data for the target CPU. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 102 | */ |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 103 | flush_cpu_data_by_index(target_idx, |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 104 | psci_svc_cpu_data.aff_info_state); |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 105 | rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 106 | if (rc != PSCI_E_SUCCESS) |
| 107 | goto exit; |
| 108 | |
| 109 | /* |
| 110 | * Call the cpu on handler registered by the Secure Payload Dispatcher |
| 111 | * to let it do any bookeeping. If the handler encounters an error, it's |
| 112 | * expected to assert within |
| 113 | */ |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 114 | if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL)) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 115 | psci_spd_pm->svc_on(target_cpu); |
| 116 | |
| 117 | /* |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 118 | * Set the Affinity info state of the target cpu to ON_PENDING. |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 119 | * Flush aff_info_state as it will be accessed with caches |
| 120 | * turned OFF. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 121 | */ |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 122 | psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 123 | flush_cpu_data_by_index(target_idx, |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 124 | psci_svc_cpu_data.aff_info_state); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 125 | |
| 126 | /* |
| 127 | * The cache line invalidation by the target CPU after setting the |
| 128 | * state to OFF (see psci_do_cpu_off()), could cause the update to |
| 129 | * aff_info_state to be invalidated. Retry the update if the target |
| 130 | * CPU aff_info_state is not ON_PENDING. |
| 131 | */ |
| 132 | target_aff_state = psci_get_aff_info_state_by_idx(target_idx); |
| 133 | if (target_aff_state != AFF_STATE_ON_PENDING) { |
| 134 | assert(target_aff_state == AFF_STATE_OFF); |
| 135 | psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 136 | flush_cpu_data_by_index(target_idx, |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 137 | psci_svc_cpu_data.aff_info_state); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 138 | |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 139 | assert(psci_get_aff_info_state_by_idx(target_idx) == |
| 140 | AFF_STATE_ON_PENDING); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 141 | } |
Soby Mathew | 6b8b302 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 142 | |
| 143 | /* |
| 144 | * Perform generic, architecture and platform specific handling. |
| 145 | */ |
Soby Mathew | 6b8b302 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 146 | /* |
| 147 | * Plat. management: Give the platform the current state |
| 148 | * of the target cpu to allow it to perform the necessary |
| 149 | * steps to power on. |
| 150 | */ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 151 | rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 152 | assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL)); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 153 | |
| 154 | if (rc == PSCI_E_SUCCESS) |
| 155 | /* Store the re-entry information for the non-secure world. */ |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 156 | cm_init_context_by_index(target_idx, ep); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 157 | else { |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 158 | /* Restore the state on error. */ |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 159 | psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 160 | flush_cpu_data_by_index(target_idx, |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 161 | psci_svc_cpu_data.aff_info_state); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 162 | } |
Soby Mathew | b0082d2 | 2015-04-09 13:40:55 +0100 | [diff] [blame] | 163 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 164 | exit: |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 165 | psci_spin_unlock_cpu(target_idx); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 166 | return rc; |
| 167 | } |
| 168 | |
| 169 | /******************************************************************************* |
Soby Mathew | 3a9e8bf | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 170 | * The following function finish an earlier power on request. They |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 171 | * are called by the common finisher routine in psci_common.c. The `state_info` |
| 172 | * is the psci_power_state from which this CPU has woken up from. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 173 | ******************************************************************************/ |
Deepika Bhavnani | 4287c0c | 2019-12-13 10:23:18 -0600 | [diff] [blame] | 174 | void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 175 | { |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 176 | /* |
| 177 | * Plat. management: Perform the platform specific actions |
| 178 | * for this cpu e.g. enabling the gic or zeroing the mailbox |
| 179 | * register. The actual state of this cpu has already been |
| 180 | * changed. |
| 181 | */ |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 182 | psci_plat_pm_ops->pwr_domain_on_finish(state_info); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 183 | |
Soby Mathew | 043fe9c | 2017-04-10 22:35:42 +0100 | [diff] [blame] | 184 | #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 185 | /* |
| 186 | * Arch. management: Enable data cache and manage stack memory |
| 187 | */ |
| 188 | psci_do_pwrup_cache_maintenance(); |
Jeenu Viswambharan | 346bfd8 | 2017-01-05 11:01:02 +0000 | [diff] [blame] | 189 | #endif |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 190 | |
| 191 | /* |
Madhukar Pappireddy | 33bd514 | 2019-08-12 18:31:33 -0500 | [diff] [blame] | 192 | * Plat. management: Perform any platform specific actions which |
| 193 | * can only be done with the cpu and the cluster guaranteed to |
| 194 | * be coherent. |
| 195 | */ |
| 196 | if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL) |
| 197 | psci_plat_pm_ops->pwr_domain_on_finish_late(state_info); |
| 198 | |
| 199 | /* |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 200 | * All the platform specific actions for turning this cpu |
| 201 | * on have completed. Perform enough arch.initialization |
| 202 | * to run in the non-secure address space. |
| 203 | */ |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 204 | psci_arch_setup(); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 205 | |
| 206 | /* |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 207 | * Lock the CPU spin lock to make sure that the context initialization |
| 208 | * is done. Since the lock is only used in this function to create |
| 209 | * a synchronization point with cpu_on_start(), it can be released |
| 210 | * immediately. |
| 211 | */ |
| 212 | psci_spin_lock_cpu(cpu_idx); |
| 213 | psci_spin_unlock_cpu(cpu_idx); |
| 214 | |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 215 | /* Ensure we have been explicitly woken up by another cpu */ |
| 216 | assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); |
| 217 | |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 218 | /* |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 219 | * Call the cpu on finish handler registered by the Secure Payload |
| 220 | * Dispatcher to let it do any bookeeping. If the handler encounters an |
| 221 | * error, it's expected to assert within |
| 222 | */ |
Antonio Nino Diaz | 56a0e8e | 2018-07-16 23:19:25 +0100 | [diff] [blame] | 223 | if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL)) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 224 | psci_spd_pm->svc_on_finish(0); |
| 225 | |
Jeenu Viswambharan | 55e56a9 | 2017-09-22 08:32:10 +0100 | [diff] [blame] | 226 | PUBLISH_EVENT(psci_cpu_on_finish); |
| 227 | |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 228 | /* Populate the mpidr field within the cpu node array */ |
| 229 | /* This needs to be done only once */ |
| 230 | psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; |
| 231 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 232 | /* |
| 233 | * Generic management: Now we just need to retrieve the |
| 234 | * information that we had stashed away during the cpu_on |
| 235 | * call to set this cpu on its way. |
| 236 | */ |
Zelalem Aweke | f92c0cb | 2022-01-31 16:59:42 -0600 | [diff] [blame] | 237 | cm_prepare_el3_exit_ns(); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 238 | } |