blob: 28a41436cecbfdb3b1d8609b9a28fdbf819956d8 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <platform.h>
33#include <psci.h>
34#include <psci_private.h>
Achin Guptac8afc782013-11-25 18:45:02 +000035#include <runtime_svc.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010036#include <asm_macros.S>
37
38 .globl psci_aff_on_finish_entry
39 .globl psci_aff_suspend_finish_entry
40 .globl __psci_cpu_off
41 .globl __psci_cpu_suspend
42
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000043 .section .text, "ax"; .align 3
Achin Gupta4f6ad662013-10-25 09:08:21 +010044
45 /* -----------------------------------------------------
46 * This cpu has been physically powered up. Depending
47 * upon whether it was resumed from suspend or simply
48 * turned on, call the common power on finisher with
49 * the handlers (chosen depending upon original state).
50 * For ease, the finisher is called with coherent
51 * stacks. This allows the cluster/cpu finishers to
52 * enter coherency and enable the mmu without running
53 * into issues. We switch back to normal stacks once
54 * all this is done.
55 * -----------------------------------------------------
56 */
57psci_aff_on_finish_entry:
58 adr x23, psci_afflvl_on_finishers
59 b psci_aff_common_finish_entry
60
61psci_aff_suspend_finish_entry:
62 adr x23, psci_afflvl_suspend_finishers
63
64psci_aff_common_finish_entry:
65 adr x22, psci_afflvl_power_on_finish
Achin Guptab739f222014-01-18 16:50:09 +000066
67 /* ---------------------------------------------
68 * Exceptions should not occur at this point.
69 * Set VBAR in order to handle and report any
70 * that do occur
71 * ---------------------------------------------
72 */
73 adr x0, early_exceptions
74 msr vbar_el3, x0
75 isb
76
Achin Gupta4f6ad662013-10-25 09:08:21 +010077 bl read_mpidr
78 mov x19, x0
79 bl platform_set_coherent_stack
80
81 /* ---------------------------------------------
82 * Call the finishers starting from affinity
83 * level 0.
84 * ---------------------------------------------
85 */
Achin Guptaa45e3972013-12-05 15:10:48 +000086 mov x0, x19
87 bl get_power_on_target_afflvl
88 cmp x0, xzr
89 b.lt _panic
Achin Gupta4f6ad662013-10-25 09:08:21 +010090 mov x3, x23
91 mov x2, x0
92 mov x0, x19
93 mov x1, #MPIDR_AFFLVL0
94 blr x22
Achin Gupta4f6ad662013-10-25 09:08:21 +010095
96 /* --------------------------------------------
97 * Give ourselves a stack allocated in Normal
98 * -IS-WBWA memory
99 * --------------------------------------------
100 */
101 mov x0, x19
102 bl platform_set_stack
103
Achin Guptab739f222014-01-18 16:50:09 +0000104 /* ---------------------------------------------
105 * Now that the execution stack has been set
106 * up, enable full runtime exception handling.
107 * Since we're just about to leave this EL with
108 * ERET, we don't need an ISB here
109 * ---------------------------------------------
110 */
111 adr x0, runtime_exceptions
112 msr vbar_el3, x0
113
Achin Gupta4f6ad662013-10-25 09:08:21 +0100114 /* --------------------------------------------
Achin Guptac8afc782013-11-25 18:45:02 +0000115 * Use the size of the general purpose register
116 * context to restore the register state
117 * stashed by earlier code
Achin Gupta4f6ad662013-10-25 09:08:21 +0100118 * --------------------------------------------
119 */
Achin Guptac8afc782013-11-25 18:45:02 +0000120 sub sp, sp, #SIZEOF_GPREGS
121 exception_exit restore_regs
Achin Gupta4f6ad662013-10-25 09:08:21 +0100122
123 /* --------------------------------------------
124 * Jump back to the non-secure world assuming
125 * that the elr and spsr setup has been done
126 * by the finishers
127 * --------------------------------------------
128 */
129 eret
130_panic:
131 b _panic
132
133 /* -----------------------------------------------------
134 * The following two stubs give the calling cpu a
135 * coherent stack to allow flushing of caches without
136 * suffering from stack coherency issues
137 * -----------------------------------------------------
138 */
139__psci_cpu_off:
140 func_prologue
141 sub sp, sp, #0x10
142 stp x19, x20, [sp, #0]
143 mov x19, sp
144 bl read_mpidr
145 bl platform_set_coherent_stack
146 bl psci_cpu_off
147 mov x1, #PSCI_E_SUCCESS
148 cmp x0, x1
149 b.eq final_wfi
150 mov sp, x19
151 ldp x19, x20, [sp,#0]
152 add sp, sp, #0x10
153 func_epilogue
154 ret
155
156__psci_cpu_suspend:
157 func_prologue
158 sub sp, sp, #0x20
159 stp x19, x20, [sp, #0]
160 stp x21, x22, [sp, #0x10]
161 mov x19, sp
162 mov x20, x0
163 mov x21, x1
164 mov x22, x2
165 bl read_mpidr
166 bl platform_set_coherent_stack
167 mov x0, x20
168 mov x1, x21
169 mov x2, x22
170 bl psci_cpu_suspend
171 mov x1, #PSCI_E_SUCCESS
172 cmp x0, x1
173 b.eq final_wfi
174 mov sp, x19
175 ldp x21, x22, [sp,#0x10]
176 ldp x19, x20, [sp,#0]
177 add sp, sp, #0x20
178 func_epilogue
179 ret
180
181final_wfi:
182 dsb sy
183 wfi
184wfi_spill:
185 b wfi_spill
186