| /* |
| * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are met: |
| * |
| * Redistributions of source code must retain the above copyright notice, this |
| * list of conditions and the following disclaimer. |
| * |
| * Redistributions in binary form must reproduce the above copyright notice, |
| * this list of conditions and the following disclaimer in the documentation |
| * and/or other materials provided with the distribution. |
| * |
| * Neither the name of ARM nor the names of its contributors may be used |
| * to endorse or promote products derived from this software without specific |
| * prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| * POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| #include <arch.h> |
| #include <asm_macros.S> |
| #include <el3_common_macros.S> |
| #include <psci.h> |
| #include <xlat_tables.h> |
| |
| .globl psci_aff_on_finish_entry |
| .globl psci_aff_suspend_finish_entry |
| .globl psci_power_down_wfi |
| |
| /* ----------------------------------------------------- |
| * This cpu has been physically powered up. Depending |
| * upon whether it was resumed from suspend or simply |
| * turned on, call the common power on finisher with |
| * the handlers (chosen depending upon original state). |
| * ----------------------------------------------------- |
| */ |
| func psci_aff_on_finish_entry |
| adr x23, psci_afflvl_on_finishers |
| b psci_aff_common_finish_entry |
| |
| psci_aff_suspend_finish_entry: |
| adr x23, psci_afflvl_suspend_finishers |
| |
| psci_aff_common_finish_entry: |
| /* |
| * On the warm boot path, most of the EL3 initialisations performed by |
| * 'el3_entrypoint_common' must be skipped: |
| * |
| * - Only when the platform bypasses the BL1/BL3-1 entrypoint by |
| * programming the reset address do we need to set the CPU endianness. |
| * In other cases, we assume this has been taken care by the |
| * entrypoint code. |
| * |
| * - No need to determine the type of boot, we know it is a warm boot. |
| * |
| * - Do not try to distinguish between primary and secondary CPUs, this |
| * notion only exists for a cold boot. |
| * |
| * - No need to initialise the memory or the C runtime environment, |
| * it has been done once and for all on the cold boot path. |
| */ |
| el3_entrypoint_common \ |
| _set_endian=PROGRAMMABLE_RESET_ADDRESS \ |
| _warm_boot_mailbox=0 \ |
| _secondary_cold_boot=0 \ |
| _init_memory=0 \ |
| _init_c_runtime=0 \ |
| _exception_vectors=runtime_exceptions |
| |
| /* -------------------------------------------- |
| * Enable the MMU with the DCache disabled. It |
| * is safe to use stacks allocated in normal |
| * memory as a result. All memory accesses are |
| * marked nGnRnE when the MMU is disabled. So |
| * all the stack writes will make it to memory. |
| * All memory accesses are marked Non-cacheable |
| * when the MMU is enabled but D$ is disabled. |
| * So used stack memory is guaranteed to be |
| * visible immediately after the MMU is enabled |
| * Enabling the DCache at the same time as the |
| * MMU can lead to speculatively fetched and |
| * possibly stale stack memory being read from |
| * other caches. This can lead to coherency |
| * issues. |
| * -------------------------------------------- |
| */ |
| mov x0, #DISABLE_DCACHE |
| bl bl31_plat_enable_mmu |
| |
| /* --------------------------------------------- |
| * Call the finishers starting from affinity |
| * level 0. |
| * --------------------------------------------- |
| */ |
| bl get_power_on_target_afflvl |
| mov x2, x23 |
| mov x1, x0 |
| mov x0, #MPIDR_AFFLVL0 |
| bl psci_afflvl_power_on_finish |
| |
| b el3_exit |
| endfunc psci_aff_on_finish_entry |
| |
| /* -------------------------------------------- |
| * This function is called to indicate to the |
| * power controller that it is safe to power |
| * down this cpu. It should not exit the wfi |
| * and will be released from reset upon power |
| * up. 'wfi_spill' is used to catch erroneous |
| * exits from wfi. |
| * -------------------------------------------- |
| */ |
| func psci_power_down_wfi |
| dsb sy // ensure write buffer empty |
| wfi |
| wfi_spill: |
| b wfi_spill |
| endfunc psci_power_down_wfi |
| |