blob: 7d89b43983e50412ee212b06fe00f40bca10f8b0 [file] [log] [blame]
Marc Zyngier09659d92014-07-12 14:24:04 +01001/*
2 * Copyright (C) 2013,2014 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <config.h>
19#include <linux/linkage.h>
20#include <asm/psci.h>
21
22 .pushsection ._secure.text, "ax"
23
24 .arch_extension sec
25
26 .align 5
27 .globl _psci_vectors
28_psci_vectors:
29 b default_psci_vector @ reset
30 b default_psci_vector @ undef
31 b _smc_psci @ smc
32 b default_psci_vector @ pabort
33 b default_psci_vector @ dabort
34 b default_psci_vector @ hyp
35 b default_psci_vector @ irq
36 b psci_fiq_enter @ fiq
37
38ENTRY(psci_fiq_enter)
39 movs pc, lr
40ENDPROC(psci_fiq_enter)
41.weak psci_fiq_enter
42
43ENTRY(default_psci_vector)
44 movs pc, lr
45ENDPROC(default_psci_vector)
46.weak default_psci_vector
47
48ENTRY(psci_cpu_suspend)
49ENTRY(psci_cpu_off)
50ENTRY(psci_cpu_on)
51ENTRY(psci_migrate)
52 mov r0, #ARM_PSCI_RET_NI @ Return -1 (Not Implemented)
53 mov pc, lr
54ENDPROC(psci_migrate)
55ENDPROC(psci_cpu_on)
56ENDPROC(psci_cpu_off)
57ENDPROC(psci_cpu_suspend)
58.weak psci_cpu_suspend
59.weak psci_cpu_off
60.weak psci_cpu_on
61.weak psci_migrate
62
63_psci_table:
64 .word ARM_PSCI_FN_CPU_SUSPEND
65 .word psci_cpu_suspend
66 .word ARM_PSCI_FN_CPU_OFF
67 .word psci_cpu_off
68 .word ARM_PSCI_FN_CPU_ON
69 .word psci_cpu_on
70 .word ARM_PSCI_FN_MIGRATE
71 .word psci_migrate
72 .word 0
73 .word 0
74
75_smc_psci:
76 push {r4-r7,lr}
77
78 @ Switch to secure
79 mrc p15, 0, r7, c1, c1, 0
80 bic r4, r7, #1
81 mcr p15, 0, r4, c1, c1, 0
82 isb
83
84 adr r4, _psci_table
851: ldr r5, [r4] @ Load PSCI function ID
86 ldr r6, [r4, #4] @ Load target PC
87 cmp r5, #0 @ If reach the end, bail out
88 moveq r0, #ARM_PSCI_RET_INVAL @ Return -2 (Invalid)
89 beq 2f
90 cmp r0, r5 @ If not matching, try next entry
91 addne r4, r4, #8
92 bne 1b
93
94 blx r6 @ Execute PSCI function
95
96 @ Switch back to non-secure
972: mcr p15, 0, r7, c1, c1, 0
98
99 pop {r4-r7, lr}
100 movs pc, lr @ Return to the kernel
101
Jan Kiszka2ecdd682015-04-21 07:18:26 +0200102@ Requires dense and single-cluster CPU ID space
103ENTRY(psci_get_cpu_id)
104 mrc p15, 0, r0, c0, c0, 5 /* read MPIDR */
105 and r0, r0, #0xff /* return CPU ID in cluster */
106 bx lr
107ENDPROC(psci_get_cpu_id)
108.weak psci_get_cpu_id
109
Jan Kiszka80c26bc2015-04-21 07:18:27 +0200110/* Imported from Linux kernel */
111LENTRY(v7_flush_dcache_all)
112 dmb @ ensure ordering with previous memory accesses
113 mrc p15, 1, r0, c0, c0, 1 @ read clidr
114 ands r3, r0, #0x7000000 @ extract loc from clidr
115 mov r3, r3, lsr #23 @ left align loc bit field
116 beq finished @ if loc is 0, then no need to clean
117 mov r10, #0 @ start clean at cache level 0
118flush_levels:
119 add r2, r10, r10, lsr #1 @ work out 3x current cache level
120 mov r1, r0, lsr r2 @ extract cache type bits from clidr
121 and r1, r1, #7 @ mask of the bits for current cache only
122 cmp r1, #2 @ see what cache we have at this level
123 blt skip @ skip if no cache, or just i-cache
124 mrs r9, cpsr @ make cssr&csidr read atomic
125 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
126 isb @ isb to sych the new cssr&csidr
127 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
128 msr cpsr_c, r9
129 and r2, r1, #7 @ extract the length of the cache lines
130 add r2, r2, #4 @ add 4 (line length offset)
131 ldr r4, =0x3ff
132 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
133 clz r5, r4 @ find bit position of way size increment
134 ldr r7, =0x7fff
135 ands r7, r7, r1, lsr #13 @ extract max number of the index size
136loop1:
137 mov r9, r7 @ create working copy of max index
138loop2:
139 orr r11, r10, r4, lsl r5 @ factor way and cache number into r11
140 orr r11, r11, r9, lsl r2 @ factor index number into r11
141 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
142 subs r9, r9, #1 @ decrement the index
143 bge loop2
144 subs r4, r4, #1 @ decrement the way
145 bge loop1
146skip:
147 add r10, r10, #2 @ increment cache number
148 cmp r3, r10
149 bgt flush_levels
150finished:
151 mov r10, #0 @ swith back to cache level 0
152 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
153 dsb st
154 isb
155 bx lr
156ENDPROC(v7_flush_dcache_all)
157
158ENTRY(psci_disable_smp)
159 mrc p15, 0, r0, c1, c0, 1 @ ACTLR
160 bic r0, r0, #(1 << 6) @ Clear SMP bit
161 mcr p15, 0, r0, c1, c0, 1 @ ACTLR
162 isb
163 dsb
164 bx lr
165ENDPROC(psci_disable_smp)
166.weak psci_disable_smp
167
Jan Kiszkab6680e62015-04-21 07:18:28 +0200168ENTRY(psci_enable_smp)
169 mrc p15, 0, r0, c1, c0, 1 @ ACTLR
170 orr r0, r0, #(1 << 6) @ Set SMP bit
171 mcr p15, 0, r0, c1, c0, 1 @ ACTLR
172 isb
173 bx lr
174ENDPROC(psci_enable_smp)
175.weak psci_enable_smp
176
Jan Kiszka80c26bc2015-04-21 07:18:27 +0200177ENTRY(psci_cpu_off_common)
178 push {lr}
179
180 mrc p15, 0, r0, c1, c0, 0 @ SCTLR
181 bic r0, r0, #(1 << 2) @ Clear C bit
182 mcr p15, 0, r0, c1, c0, 0 @ SCTLR
183 isb
184 dsb
185
186 bl v7_flush_dcache_all
187
188 clrex @ Why???
189
190 bl psci_disable_smp
191
192 pop {lr}
193 bx lr
194ENDPROC(psci_cpu_off_common)
195
Jan Kiszkab6680e62015-04-21 07:18:28 +0200196ENTRY(psci_cpu_entry)
197 bl psci_enable_smp
198
199 bl _nonsec_init
200
201 adr r0, _psci_target_pc
202 ldr r0, [r0]
203 b _do_nonsec_entry
204ENDPROC(psci_cpu_entry)
205
206.globl _psci_target_pc
207_psci_target_pc:
208 .word 0
209
Marc Zyngier09659d92014-07-12 14:24:04 +0100210 .popsection