blob: cd29266ed7d73303700ed70d5f775ba85503286f [file] [log] [blame]
Dimitris Papastamosc52ebdc2017-12-18 13:46:21 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <context.h>
10
11 .globl workaround_bpiall_vbar0_runtime_exceptions
12
13#define EMIT_BPIALL 0xee070fd5
14#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v
15#define EMIT_SMC 0xe1600070
16
17 .macro enter_workaround _stub_name
18 /* Save GP regs */
19 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
20 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
21 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
22 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
23 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
24 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
25 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
26 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
27 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
28 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
29 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
30 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
31 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
32 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
33 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
34
35 adr x4, \_stub_name
36
37 /*
38 * Load SPSR_EL3 and VBAR_EL3. SPSR_EL3 is set up to have
39 * all interrupts masked in preparation to running the workaround
40 * stub in S-EL1. VBAR_EL3 points to the vector table that
41 * will handle the SMC back from the workaround stub.
42 */
43 ldp x0, x1, [x4, #0]
44
45 /*
46 * Load SCTLR_EL1 and ELR_EL3. SCTLR_EL1 is configured to disable
47 * the MMU in S-EL1. ELR_EL3 points to the appropriate stub in S-EL1.
48 */
49 ldp x2, x3, [x4, #16]
50
51 mrs x4, scr_el3
52 mrs x5, spsr_el3
53 mrs x6, elr_el3
54 mrs x7, sctlr_el1
55 mrs x8, esr_el3
56
57 /* Preserve system registers in the workaround context */
58 stp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
59 stp x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
60 stp x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
61
62 /*
63 * Setting SCR_EL3 to all zeroes means that the NS, RW
64 * and SMD bits are configured as expected.
65 */
66 msr scr_el3, xzr
67
68 /*
69 * Reload system registers with the crafted values
70 * in preparation for entry in S-EL1.
71 */
72 msr spsr_el3, x0
73 msr vbar_el3, x1
74 msr sctlr_el1, x2
75 msr elr_el3, x3
76
77 eret
78 .endm
79
80 /* ---------------------------------------------------------------------
81 * This vector table is used at runtime to enter the workaround at
82 * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround
83 * is not enabled, the existing runtime exception vector table is used.
84 * ---------------------------------------------------------------------
85 */
86vector_base workaround_bpiall_vbar0_runtime_exceptions
87
88 /* ---------------------------------------------------------------------
89 * Current EL with SP_EL0 : 0x0 - 0x200
90 * ---------------------------------------------------------------------
91 */
92vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
93 b sync_exception_sp_el0
94 /*
95 * Since each vector table entry is 128 bytes, we can store the
96 * stub context in the unused space to minimize memory footprint.
97 */
98aarch32_stub_smc:
99 .word EMIT_BPIALL
100 .word EMIT_MOV_R0_IMM(1)
101 .word EMIT_SMC
102aarch32_stub_ctx_smc:
103 /* Mask all interrupts and set AArch32 Supervisor mode */
104 .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
105 SPSR_M_AARCH32 << SPSR_M_SHIFT | \
106 MODE32_svc << MODE32_SHIFT)
107
108 /*
109 * VBAR_EL3 points to vbar1 which is the vector table
110 * used while the workaround is executing.
111 */
112 .quad workaround_bpiall_vbar1_runtime_exceptions
113
114 /* Setup SCTLR_EL1 with MMU off and I$ on */
115 .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
116
117 /* ELR_EL3 is setup to point to the sync exception stub in AArch32 */
118 .quad aarch32_stub_smc
119 check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
120
121vector_entry workaround_bpiall_vbar0_irq_sp_el0
122 b irq_sp_el0
123aarch32_stub_irq:
124 .word EMIT_BPIALL
125 .word EMIT_MOV_R0_IMM(2)
126 .word EMIT_SMC
127aarch32_stub_ctx_irq:
128 .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
129 SPSR_M_AARCH32 << SPSR_M_SHIFT | \
130 MODE32_svc << MODE32_SHIFT)
131 .quad workaround_bpiall_vbar1_runtime_exceptions
132 .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
133 .quad aarch32_stub_irq
134 check_vector_size workaround_bpiall_vbar0_irq_sp_el0
135
136vector_entry workaround_bpiall_vbar0_fiq_sp_el0
137 b fiq_sp_el0
138aarch32_stub_fiq:
139 .word EMIT_BPIALL
140 .word EMIT_MOV_R0_IMM(4)
141 .word EMIT_SMC
142aarch32_stub_ctx_fiq:
143 .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
144 SPSR_M_AARCH32 << SPSR_M_SHIFT | \
145 MODE32_svc << MODE32_SHIFT)
146 .quad workaround_bpiall_vbar1_runtime_exceptions
147 .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
148 .quad aarch32_stub_fiq
149 check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
150
151vector_entry workaround_bpiall_vbar0_serror_sp_el0
152 b serror_sp_el0
153aarch32_stub_serror:
154 .word EMIT_BPIALL
155 .word EMIT_MOV_R0_IMM(8)
156 .word EMIT_SMC
157aarch32_stub_ctx_serror:
158 .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
159 SPSR_M_AARCH32 << SPSR_M_SHIFT | \
160 MODE32_svc << MODE32_SHIFT)
161 .quad workaround_bpiall_vbar1_runtime_exceptions
162 .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
163 .quad aarch32_stub_serror
164 check_vector_size workaround_bpiall_vbar0_serror_sp_el0
165
166 /* ---------------------------------------------------------------------
167 * Current EL with SP_ELx: 0x200 - 0x400
168 * ---------------------------------------------------------------------
169 */
170vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
171 b sync_exception_sp_elx
172 check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx
173
174vector_entry workaround_bpiall_vbar0_irq_sp_elx
175 b irq_sp_elx
176 check_vector_size workaround_bpiall_vbar0_irq_sp_elx
177
178vector_entry workaround_bpiall_vbar0_fiq_sp_elx
179 b fiq_sp_elx
180 check_vector_size workaround_bpiall_vbar0_fiq_sp_elx
181
182vector_entry workaround_bpiall_vbar0_serror_sp_elx
183 b serror_sp_elx
184 check_vector_size workaround_bpiall_vbar0_serror_sp_elx
185
186 /* ---------------------------------------------------------------------
187 * Lower EL using AArch64 : 0x400 - 0x600
188 * ---------------------------------------------------------------------
189 */
190vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
191 enter_workaround aarch32_stub_ctx_smc
192 check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
193
194vector_entry workaround_bpiall_vbar0_irq_aarch64
195 enter_workaround aarch32_stub_ctx_irq
196 check_vector_size workaround_bpiall_vbar0_irq_aarch64
197
198vector_entry workaround_bpiall_vbar0_fiq_aarch64
199 enter_workaround aarch32_stub_ctx_fiq
200 check_vector_size workaround_bpiall_vbar0_fiq_aarch64
201
202vector_entry workaround_bpiall_vbar0_serror_aarch64
203 enter_workaround aarch32_stub_ctx_serror
204 check_vector_size workaround_bpiall_vbar0_serror_aarch64
205
206 /* ---------------------------------------------------------------------
207 * Lower EL using AArch32 : 0x600 - 0x800
208 * ---------------------------------------------------------------------
209 */
210vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
211 enter_workaround aarch32_stub_ctx_smc
212 check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
213
214vector_entry workaround_bpiall_vbar0_irq_aarch32
215 enter_workaround aarch32_stub_ctx_irq
216 check_vector_size workaround_bpiall_vbar0_irq_aarch32
217
218vector_entry workaround_bpiall_vbar0_fiq_aarch32
219 enter_workaround aarch32_stub_ctx_fiq
220 check_vector_size workaround_bpiall_vbar0_fiq_aarch32
221
222vector_entry workaround_bpiall_vbar0_serror_aarch32
223 enter_workaround aarch32_stub_ctx_serror
224 check_vector_size workaround_bpiall_vbar0_serror_aarch32
225
226 /* ---------------------------------------------------------------------
227 * This vector table is used while the workaround is executing. It
228 * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
229 * workaround stubs to enter EL3 from S-EL1. It restores the previous
230 * EL3 state before proceeding with the normal runtime exception vector.
231 * ---------------------------------------------------------------------
232 */
233vector_base workaround_bpiall_vbar1_runtime_exceptions
234
235 /* ---------------------------------------------------------------------
236 * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
237 * ---------------------------------------------------------------------
238 */
239vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
240 b report_unhandled_exception
241 check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0
242
243vector_entry workaround_bpiall_vbar1_irq_sp_el0
244 b report_unhandled_interrupt
245 check_vector_size workaround_bpiall_vbar1_irq_sp_el0
246
247vector_entry workaround_bpiall_vbar1_fiq_sp_el0
248 b report_unhandled_interrupt
249 check_vector_size workaround_bpiall_vbar1_fiq_sp_el0
250
251vector_entry workaround_bpiall_vbar1_serror_sp_el0
252 b report_unhandled_exception
253 check_vector_size workaround_bpiall_vbar1_serror_sp_el0
254
255 /* ---------------------------------------------------------------------
256 * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
257 * ---------------------------------------------------------------------
258 */
259vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
260 b report_unhandled_exception
261 check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx
262
263vector_entry workaround_bpiall_vbar1_irq_sp_elx
264 b report_unhandled_interrupt
265 check_vector_size workaround_bpiall_vbar1_irq_sp_elx
266
267vector_entry workaround_bpiall_vbar1_fiq_sp_elx
268 b report_unhandled_interrupt
269 check_vector_size workaround_bpiall_vbar1_fiq_sp_elx
270
271vector_entry workaround_bpiall_vbar1_serror_sp_elx
272 b report_unhandled_exception
273 check_vector_size workaround_bpiall_vbar1_serror_sp_elx
274
275 /* ---------------------------------------------------------------------
276 * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
277 * ---------------------------------------------------------------------
278 */
279vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
280 b report_unhandled_exception
281 check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64
282
283vector_entry workaround_bpiall_vbar1_irq_aarch64
284 b report_unhandled_interrupt
285 check_vector_size workaround_bpiall_vbar1_irq_aarch64
286
287vector_entry workaround_bpiall_vbar1_fiq_aarch64
288 b report_unhandled_interrupt
289 check_vector_size workaround_bpiall_vbar1_fiq_aarch64
290
291vector_entry workaround_bpiall_vbar1_serror_aarch64
292 b report_unhandled_exception
293 check_vector_size workaround_bpiall_vbar1_serror_aarch64
294
295 /* ---------------------------------------------------------------------
296 * Lower EL using AArch32 : 0x600 - 0x800
297 * ---------------------------------------------------------------------
298 */
299vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
300 /* Restore register state from the workaround context */
301 ldp x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
302 ldp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
303 ldp x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
304
305 /* Apply the restored system register state */
306 msr scr_el3, x2
307 msr spsr_el3, x3
308 msr elr_el3, x4
309 msr sctlr_el1, x5
310 msr esr_el3, x6
311
312 /*
313 * Workaround is complete, so swap VBAR_EL3 to point
314 * to workaround entry table in preparation for subsequent
315 * Sync/IRQ/FIQ/SError exceptions.
316 */
317 adr x2, workaround_bpiall_vbar0_runtime_exceptions
318 msr vbar_el3, x2
319
320 /*
321 * Restore all GP regs except x0 and x1. The value in x0
322 * indicates the type of the original exception.
323 */
324 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
325 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
326 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
327 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
328 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
329 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
330 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
331 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
332 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
333 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
334 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
335 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
336 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
337 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
338
339 /*
340 * Each of these handlers will first restore x0 and x1 from
341 * the context and the branch to the common implementation for
342 * each of the exception types.
343 */
344 tbnz x0, #1, workaround_bpiall_vbar1_irq
345 tbnz x0, #2, workaround_bpiall_vbar1_fiq
346 tbnz x0, #3, workaround_bpiall_vbar1_serror
347
348 /* Fallthrough case for Sync exception */
349 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
350 b sync_exception_aarch64
351 check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
352
353vector_entry workaround_bpiall_vbar1_irq_aarch32
354 b report_unhandled_interrupt
355workaround_bpiall_vbar1_irq:
356 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
357 b irq_aarch64
358 check_vector_size workaround_bpiall_vbar1_irq_aarch32
359
360vector_entry workaround_bpiall_vbar1_fiq_aarch32
361 b report_unhandled_interrupt
362workaround_bpiall_vbar1_fiq:
363 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
364 b fiq_aarch64
365 check_vector_size workaround_bpiall_vbar1_fiq_aarch32
366
367vector_entry workaround_bpiall_vbar1_serror_aarch32
368 b report_unhandled_exception
369workaround_bpiall_vbar1_serror:
370 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
371 b serror_aarch64
372 check_vector_size workaround_bpiall_vbar1_serror_aarch32