blob: 3f0d4f0c075b17c2981b719be224f2a869fa93d8 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <asm_macros.S>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <psci.h>
Achin Guptae1aa5162014-06-26 09:58:52 +010034#include <xlat_tables.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010035
36 .globl psci_aff_on_finish_entry
37 .globl psci_aff_suspend_finish_entry
Achin Gupta42c52802014-05-09 19:32:25 +010038 .globl psci_power_down_wfi
Achin Gupta4f6ad662013-10-25 09:08:21 +010039
Achin Gupta4f6ad662013-10-25 09:08:21 +010040 /* -----------------------------------------------------
41 * This cpu has been physically powered up. Depending
42 * upon whether it was resumed from suspend or simply
43 * turned on, call the common power on finisher with
44 * the handlers (chosen depending upon original state).
Achin Gupta4f6ad662013-10-25 09:08:21 +010045 * -----------------------------------------------------
46 */
Andrew Thoelke38bde412014-03-18 13:46:55 +000047func psci_aff_on_finish_entry
Achin Gupta4f6ad662013-10-25 09:08:21 +010048 adr x23, psci_afflvl_on_finishers
49 b psci_aff_common_finish_entry
50
51psci_aff_suspend_finish_entry:
52 adr x23, psci_afflvl_suspend_finishers
53
54psci_aff_common_finish_entry:
Achin Gupta9f098352014-07-18 18:38:28 +010055#if !RESET_TO_BL31
56 /* ---------------------------------------------
Yatharth Kochar36433d12014-11-20 18:09:41 +000057 * Perform any processor specific actions which
58 * undo or are in addition to the actions
59 * performed by the reset handler in the BootROM
60 * (BL1) e.g. cache, tlb invalidations, errata
61 * workarounds etc.
62 * ---------------------------------------------
63 */
64 bl reset_handler
65
66 /* ---------------------------------------------
Achin Gupta9f098352014-07-18 18:38:28 +010067 * Enable the instruction cache, stack pointer
Yatharth Kochar36433d12014-11-20 18:09:41 +000068 * and data access alignment checks.
Achin Gupta9f098352014-07-18 18:38:28 +010069 * It can be assumed that BL3-1 entrypoint code
70 * will do this when RESET_TO_BL31 is set. The
71 * same assumption cannot be made when another
72 * boot loader executes before BL3-1 in the warm
73 * boot path e.g. BL1.
74 * ---------------------------------------------
75 */
76 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
77 mrs x0, sctlr_el3
78 orr x0, x0, x1
79 msr sctlr_el3, x0
80 isb
81#endif
82
Achin Guptab739f222014-01-18 16:50:09 +000083 /* ---------------------------------------------
Andrew Thoelke8c28fe02014-06-02 11:40:35 +010084 * Initialise the pcpu cache pointer for the CPU
85 * ---------------------------------------------
86 */
87 bl init_cpu_data_ptr
88
89 /* ---------------------------------------------
Andrew Thoelke4d2d5532014-06-02 12:38:12 +010090 * Set the exception vectors
Achin Guptab739f222014-01-18 16:50:09 +000091 * ---------------------------------------------
92 */
Andrew Thoelke4d2d5532014-06-02 12:38:12 +010093 adr x0, runtime_exceptions
Achin Guptab739f222014-01-18 16:50:09 +000094 msr vbar_el3, x0
95 isb
96
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000097 /* ---------------------------------------------
Achin Guptaed1744e2014-08-04 23:13:10 +010098 * Enable the SError interrupt now that the
99 * exception vectors have been setup.
100 * ---------------------------------------------
101 */
102 msr daifclr, #DAIF_ABT_BIT
103
104 /* ---------------------------------------------
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000105 * Use SP_EL0 for the C runtime stack.
106 * ---------------------------------------------
107 */
108 msr spsel, #0
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000109
Achin Guptae1aa5162014-06-26 09:58:52 +0100110 /* --------------------------------------------
111 * Give ourselves a stack whose memory will be
112 * marked as Normal-IS-WBWA when the MMU is
113 * enabled.
114 * --------------------------------------------
115 */
Andrew Thoelkef977ed82014-04-28 12:32:02 +0100116 mrs x0, mpidr_el1
Achin Guptae1aa5162014-06-26 09:58:52 +0100117 bl platform_set_stack
118
119 /* --------------------------------------------
120 * Enable the MMU with the DCache disabled. It
121 * is safe to use stacks allocated in normal
122 * memory as a result. All memory accesses are
123 * marked nGnRnE when the MMU is disabled. So
124 * all the stack writes will make it to memory.
125 * All memory accesses are marked Non-cacheable
126 * when the MMU is enabled but D$ is disabled.
127 * So used stack memory is guaranteed to be
128 * visible immediately after the MMU is enabled
129 * Enabling the DCache at the same time as the
130 * MMU can lead to speculatively fetched and
131 * possibly stale stack memory being read from
132 * other caches. This can lead to coherency
133 * issues.
134 * --------------------------------------------
135 */
136 mov x0, #DISABLE_DCACHE
137 bl bl31_plat_enable_mmu
Achin Gupta4f6ad662013-10-25 09:08:21 +0100138
139 /* ---------------------------------------------
140 * Call the finishers starting from affinity
141 * level 0.
142 * ---------------------------------------------
143 */
Achin Guptaa45e3972013-12-05 15:10:48 +0000144 bl get_power_on_target_afflvl
Andrew Thoelke2bc07852014-06-09 12:44:21 +0100145 mov x2, x23
146 mov x1, x0
147 mov x0, #MPIDR_AFFLVL0
148 bl psci_afflvl_power_on_finish
Achin Gupta4f6ad662013-10-25 09:08:21 +0100149
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000150 b el3_exit
Kévin Petita877c252015-03-24 14:03:57 +0000151endfunc psci_aff_on_finish_entry
Achin Gupta4f6ad662013-10-25 09:08:21 +0100152
Achin Gupta42c52802014-05-09 19:32:25 +0100153 /* --------------------------------------------
154 * This function is called to indicate to the
155 * power controller that it is safe to power
156 * down this cpu. It should not exit the wfi
157 * and will be released from reset upon power
158 * up. 'wfi_spill' is used to catch erroneous
159 * exits from wfi.
160 * --------------------------------------------
161 */
162func psci_power_down_wfi
Andrew Thoelke42e75a72014-04-28 12:28:39 +0100163 dsb sy // ensure write buffer empty
Achin Gupta4f6ad662013-10-25 09:08:21 +0100164 wfi
165wfi_spill:
166 b wfi_spill
Kévin Petita877c252015-03-24 14:03:57 +0000167endfunc psci_power_down_wfi
Achin Gupta4f6ad662013-10-25 09:08:21 +0100168