Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 1 | /* |
Jeenu Viswambharan | 346bfd8 | 2017-01-05 11:01:02 +0000 | [diff] [blame] | 2 | * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
| 8 | #include <arch_helpers.h> |
| 9 | #include <assert.h> |
| 10 | #include <bl_common.h> |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 11 | #include <context_mgmt.h> |
Isla Mitchell | 9930501 | 2017-07-11 14:54:08 +0100 | [diff] [blame] | 12 | #include <debug.h> |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 13 | #include <platform.h> |
Jeenu Viswambharan | 55e56a9 | 2017-09-22 08:32:10 +0100 | [diff] [blame] | 14 | #include <pubsub_events.h> |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 15 | #include <stddef.h> |
| 16 | #include "psci_private.h" |
| 17 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 18 | /******************************************************************************* |
| 19 | * This function checks whether a cpu which has been requested to be turned on |
| 20 | * is OFF to begin with. |
| 21 | ******************************************************************************/ |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 22 | static int cpu_on_validate_state(aff_info_state_t aff_state) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 23 | { |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 24 | if (aff_state == AFF_STATE_ON) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 25 | return PSCI_E_ALREADY_ON; |
| 26 | |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 27 | if (aff_state == AFF_STATE_ON_PENDING) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 28 | return PSCI_E_ON_PENDING; |
| 29 | |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 30 | assert(aff_state == AFF_STATE_OFF); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 31 | return PSCI_E_SUCCESS; |
| 32 | } |
| 33 | |
| 34 | /******************************************************************************* |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 35 | * Generic handler which is called to physically power on a cpu identified by |
Soby Mathew | 6b8b302 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 36 | * its mpidr. It performs the generic, architectural, platform setup and state |
| 37 | * management to power on the target cpu e.g. it will ensure that |
| 38 | * enough information is stashed for it to resume execution in the non-secure |
| 39 | * security state. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 40 | * |
Soby Mathew | 3a9e8bf | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 41 | * The state of all the relevant power domains are changed after calling the |
Soby Mathew | 6b8b302 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 42 | * platform handler as it can return error. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 43 | ******************************************************************************/ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 44 | int psci_cpu_on_start(u_register_t target_cpu, |
Sandrine Bailleux | 7497bff | 2016-04-25 09:28:43 +0100 | [diff] [blame] | 45 | entry_point_info_t *ep) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 46 | { |
| 47 | int rc; |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 48 | unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 49 | aff_info_state_t target_aff_state; |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 50 | |
Sandrine Bailleux | 6181acb | 2016-04-22 13:00:19 +0100 | [diff] [blame] | 51 | /* Calling function must supply valid input arguments */ |
| 52 | assert((int) target_idx >= 0); |
| 53 | assert(ep != NULL); |
| 54 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 55 | /* |
| 56 | * This function must only be called on platforms where the |
| 57 | * CPU_ON platform hooks have been implemented. |
| 58 | */ |
Soby Mathew | 3a9e8bf | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 59 | assert(psci_plat_pm_ops->pwr_domain_on && |
| 60 | psci_plat_pm_ops->pwr_domain_on_finish); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 61 | |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 62 | /* Protect against multiple CPUs trying to turn ON the same target CPU */ |
| 63 | psci_spin_lock_cpu(target_idx); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 64 | |
| 65 | /* |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 66 | * Generic management: Ensure that the cpu is off to be |
| 67 | * turned on. |
David Cunado | 06adba2 | 2017-07-19 12:14:07 +0100 | [diff] [blame] | 68 | * Perform cache maintanence ahead of reading the target CPU state to |
| 69 | * ensure that the data is not stale. |
| 70 | * There is a theoretical edge case where the cache may contain stale |
| 71 | * data for the target CPU data - this can occur under the following |
| 72 | * conditions: |
| 73 | * - the target CPU is in another cluster from the current |
| 74 | * - the target CPU was the last CPU to shutdown on its cluster |
| 75 | * - the cluster was removed from coherency as part of the CPU shutdown |
| 76 | * |
| 77 | * In this case the cache maintenace that was performed as part of the |
| 78 | * target CPUs shutdown was not seen by the current CPU's cluster. And |
| 79 | * so the cache may contain stale data for the target CPU. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 80 | */ |
David Cunado | 06adba2 | 2017-07-19 12:14:07 +0100 | [diff] [blame] | 81 | flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 82 | rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 83 | if (rc != PSCI_E_SUCCESS) |
| 84 | goto exit; |
| 85 | |
| 86 | /* |
| 87 | * Call the cpu on handler registered by the Secure Payload Dispatcher |
| 88 | * to let it do any bookeeping. If the handler encounters an error, it's |
| 89 | * expected to assert within |
| 90 | */ |
| 91 | if (psci_spd_pm && psci_spd_pm->svc_on) |
| 92 | psci_spd_pm->svc_on(target_cpu); |
| 93 | |
| 94 | /* |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 95 | * Set the Affinity info state of the target cpu to ON_PENDING. |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 96 | * Flush aff_info_state as it will be accessed with caches |
| 97 | * turned OFF. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 98 | */ |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 99 | psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 100 | flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); |
| 101 | |
| 102 | /* |
| 103 | * The cache line invalidation by the target CPU after setting the |
| 104 | * state to OFF (see psci_do_cpu_off()), could cause the update to |
| 105 | * aff_info_state to be invalidated. Retry the update if the target |
| 106 | * CPU aff_info_state is not ON_PENDING. |
| 107 | */ |
| 108 | target_aff_state = psci_get_aff_info_state_by_idx(target_idx); |
| 109 | if (target_aff_state != AFF_STATE_ON_PENDING) { |
| 110 | assert(target_aff_state == AFF_STATE_OFF); |
| 111 | psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); |
| 112 | flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); |
| 113 | |
| 114 | assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING); |
| 115 | } |
Soby Mathew | 6b8b302 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 116 | |
| 117 | /* |
| 118 | * Perform generic, architecture and platform specific handling. |
| 119 | */ |
Soby Mathew | 6b8b302 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 120 | /* |
| 121 | * Plat. management: Give the platform the current state |
| 122 | * of the target cpu to allow it to perform the necessary |
| 123 | * steps to power on. |
| 124 | */ |
Soby Mathew | 011ca18 | 2015-07-29 17:05:03 +0100 | [diff] [blame] | 125 | rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 126 | assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); |
| 127 | |
| 128 | if (rc == PSCI_E_SUCCESS) |
| 129 | /* Store the re-entry information for the non-secure world. */ |
Soby Mathew | b0082d2 | 2015-04-09 13:40:55 +0100 | [diff] [blame] | 130 | cm_init_context_by_index(target_idx, ep); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 131 | else { |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 132 | /* Restore the state on error. */ |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 133 | psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); |
Soby Mathew | ca37050 | 2016-01-26 11:47:53 +0000 | [diff] [blame] | 134 | flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); |
| 135 | } |
Soby Mathew | b0082d2 | 2015-04-09 13:40:55 +0100 | [diff] [blame] | 136 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 137 | exit: |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 138 | psci_spin_unlock_cpu(target_idx); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 139 | return rc; |
| 140 | } |
| 141 | |
| 142 | /******************************************************************************* |
Soby Mathew | 3a9e8bf | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 143 | * The following function finish an earlier power on request. They |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 144 | * are called by the common finisher routine in psci_common.c. The `state_info` |
| 145 | * is the psci_power_state from which this CPU has woken up from. |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 146 | ******************************************************************************/ |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 147 | void psci_cpu_on_finish(unsigned int cpu_idx, |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 148 | psci_power_state_t *state_info) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 149 | { |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 150 | /* |
| 151 | * Plat. management: Perform the platform specific actions |
| 152 | * for this cpu e.g. enabling the gic or zeroing the mailbox |
| 153 | * register. The actual state of this cpu has already been |
| 154 | * changed. |
| 155 | */ |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 156 | psci_plat_pm_ops->pwr_domain_on_finish(state_info); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 157 | |
Soby Mathew | 043fe9c | 2017-04-10 22:35:42 +0100 | [diff] [blame] | 158 | #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 159 | /* |
| 160 | * Arch. management: Enable data cache and manage stack memory |
| 161 | */ |
| 162 | psci_do_pwrup_cache_maintenance(); |
Jeenu Viswambharan | 346bfd8 | 2017-01-05 11:01:02 +0000 | [diff] [blame] | 163 | #endif |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 164 | |
| 165 | /* |
| 166 | * All the platform specific actions for turning this cpu |
| 167 | * on have completed. Perform enough arch.initialization |
| 168 | * to run in the non-secure address space. |
| 169 | */ |
Soby Mathew | d019487 | 2016-04-29 19:01:30 +0100 | [diff] [blame] | 170 | psci_arch_setup(); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 171 | |
| 172 | /* |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 173 | * Lock the CPU spin lock to make sure that the context initialization |
| 174 | * is done. Since the lock is only used in this function to create |
| 175 | * a synchronization point with cpu_on_start(), it can be released |
| 176 | * immediately. |
| 177 | */ |
| 178 | psci_spin_lock_cpu(cpu_idx); |
| 179 | psci_spin_unlock_cpu(cpu_idx); |
| 180 | |
Soby Mathew | 85dbf5a | 2015-04-07 12:16:56 +0100 | [diff] [blame] | 181 | /* Ensure we have been explicitly woken up by another cpu */ |
| 182 | assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); |
| 183 | |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 184 | /* |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 185 | * Call the cpu on finish handler registered by the Secure Payload |
| 186 | * Dispatcher to let it do any bookeeping. If the handler encounters an |
| 187 | * error, it's expected to assert within |
| 188 | */ |
| 189 | if (psci_spd_pm && psci_spd_pm->svc_on_finish) |
| 190 | psci_spd_pm->svc_on_finish(0); |
| 191 | |
Jeenu Viswambharan | 55e56a9 | 2017-09-22 08:32:10 +0100 | [diff] [blame] | 192 | PUBLISH_EVENT(psci_cpu_on_finish); |
| 193 | |
Soby Mathew | 9d754f6 | 2015-04-08 17:42:06 +0100 | [diff] [blame] | 194 | /* Populate the mpidr field within the cpu node array */ |
| 195 | /* This needs to be done only once */ |
| 196 | psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; |
| 197 | |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 198 | /* |
| 199 | * Generic management: Now we just need to retrieve the |
| 200 | * information that we had stashed away during the cpu_on |
| 201 | * call to set this cpu on its way. |
| 202 | */ |
| 203 | cm_prepare_el3_exit(NON_SECURE); |
Soby Mathew | 991d42c | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 204 | } |