blob: aee4feeea198cec0c4a21827c6930cfc6682f53b [file] [log] [blame]
Varun Wadekar28463b92015-07-14 17:11:20 +05301/*
Varun Wadekare34bd092018-01-10 17:03:22 -08002 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
Varun Wadekar28463b92015-07-14 17:11:20 +05303 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Varun Wadekar28463b92015-07-14 17:11:20 +05305 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekare34bd092018-01-10 17:03:22 -080010#include <context.h>
Varun Wadekar28463b92015-07-14 17:11:20 +053011#include <denver.h>
12#include <cpu_macros.S>
13#include <plat_macros.S>
14
Varun Wadekare34bd092018-01-10 17:03:22 -080015 /* -------------------------------------------------
16 * CVE-2017-5715 mitigation
17 *
18 * Flush the indirect branch predictor and RSB on
19 * entry to EL3 by issuing a newly added instruction
20 * for Denver CPUs.
21 *
22 * To achieve this without performing any branch
23 * instruction, a per-cpu vbar is installed which
24 * executes the workaround and then branches off to
25 * the corresponding vector entry in the main vector
26 * table.
27 * -------------------------------------------------
28 */
29 .globl workaround_bpflush_runtime_exceptions
30
31vector_base workaround_bpflush_runtime_exceptions
32
33 .macro apply_workaround
34 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
35
36 /* -------------------------------------------------
37 * A new write-only system register where a write of
38 * 1 to bit 0 will cause the indirect branch predictor
39 * and RSB to be flushed.
40 *
41 * A write of 0 to bit 0 will be ignored. A write of
42 * 1 to any other bit will cause an MCA.
43 * -------------------------------------------------
44 */
45 mov x0, #1
46 msr s3_0_c15_c0_6, x0
47 isb
48
49 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
50 .endm
51
52 /* ---------------------------------------------------------------------
53 * Current EL with SP_EL0 : 0x0 - 0x200
54 * ---------------------------------------------------------------------
55 */
56vector_entry workaround_bpflush_sync_exception_sp_el0
57 b sync_exception_sp_el0
58 check_vector_size workaround_bpflush_sync_exception_sp_el0
59
60vector_entry workaround_bpflush_irq_sp_el0
61 b irq_sp_el0
62 check_vector_size workaround_bpflush_irq_sp_el0
63
64vector_entry workaround_bpflush_fiq_sp_el0
65 b fiq_sp_el0
66 check_vector_size workaround_bpflush_fiq_sp_el0
67
68vector_entry workaround_bpflush_serror_sp_el0
69 b serror_sp_el0
70 check_vector_size workaround_bpflush_serror_sp_el0
71
72 /* ---------------------------------------------------------------------
73 * Current EL with SP_ELx: 0x200 - 0x400
74 * ---------------------------------------------------------------------
75 */
76vector_entry workaround_bpflush_sync_exception_sp_elx
77 b sync_exception_sp_elx
78 check_vector_size workaround_bpflush_sync_exception_sp_elx
79
80vector_entry workaround_bpflush_irq_sp_elx
81 b irq_sp_elx
82 check_vector_size workaround_bpflush_irq_sp_elx
83
84vector_entry workaround_bpflush_fiq_sp_elx
85 b fiq_sp_elx
86 check_vector_size workaround_bpflush_fiq_sp_elx
87
88vector_entry workaround_bpflush_serror_sp_elx
89 b serror_sp_elx
90 check_vector_size workaround_bpflush_serror_sp_elx
91
92 /* ---------------------------------------------------------------------
93 * Lower EL using AArch64 : 0x400 - 0x600
94 * ---------------------------------------------------------------------
95 */
96vector_entry workaround_bpflush_sync_exception_aarch64
97 apply_workaround
98 b sync_exception_aarch64
99 check_vector_size workaround_bpflush_sync_exception_aarch64
100
101vector_entry workaround_bpflush_irq_aarch64
102 apply_workaround
103 b irq_aarch64
104 check_vector_size workaround_bpflush_irq_aarch64
105
106vector_entry workaround_bpflush_fiq_aarch64
107 apply_workaround
108 b fiq_aarch64
109 check_vector_size workaround_bpflush_fiq_aarch64
110
111vector_entry workaround_bpflush_serror_aarch64
112 apply_workaround
113 b serror_aarch64
114 check_vector_size workaround_bpflush_serror_aarch64
115
116 /* ---------------------------------------------------------------------
117 * Lower EL using AArch32 : 0x600 - 0x800
118 * ---------------------------------------------------------------------
119 */
120vector_entry workaround_bpflush_sync_exception_aarch32
121 apply_workaround
122 b sync_exception_aarch32
123 check_vector_size workaround_bpflush_sync_exception_aarch32
124
125vector_entry workaround_bpflush_irq_aarch32
126 apply_workaround
127 b irq_aarch32
128 check_vector_size workaround_bpflush_irq_aarch32
129
130vector_entry workaround_bpflush_fiq_aarch32
131 apply_workaround
132 b fiq_aarch32
133 check_vector_size workaround_bpflush_fiq_aarch32
134
135vector_entry workaround_bpflush_serror_aarch32
136 apply_workaround
137 b serror_aarch32
138 check_vector_size workaround_bpflush_serror_aarch32
139
Varun Wadekard43583c2016-02-22 11:09:41 -0800140 .global denver_disable_dco
141
Varun Wadekar28463b92015-07-14 17:11:20 +0530142 /* ---------------------------------------------
143 * Disable debug interfaces
144 * ---------------------------------------------
145 */
146func denver_disable_ext_debug
147 mov x0, #1
148 msr osdlr_el1, x0
149 isb
150 dsb sy
151 ret
152endfunc denver_disable_ext_debug
153
154 /* ----------------------------------------------------
155 * Enable dynamic code optimizer (DCO)
156 * ----------------------------------------------------
157 */
158func denver_enable_dco
159 mrs x0, mpidr_el1
160 and x0, x0, #0xF
161 mov x1, #1
162 lsl x1, x1, x0
163 msr s3_0_c15_c0_2, x1
Varun Wadekar28463b92015-07-14 17:11:20 +0530164 ret
165endfunc denver_enable_dco
166
167 /* ----------------------------------------------------
168 * Disable dynamic code optimizer (DCO)
169 * ----------------------------------------------------
170 */
171func denver_disable_dco
172
173 /* turn off background work */
174 mrs x0, mpidr_el1
175 and x0, x0, #0xF
176 mov x1, #1
177 lsl x1, x1, x0
178 lsl x2, x1, #16
179 msr s3_0_c15_c0_2, x2
180 isb
181
182 /* wait till the background work turns off */
1831: mrs x2, s3_0_c15_c0_2
184 lsr x2, x2, #32
185 and w2, w2, 0xFFFF
186 and x2, x2, x1
187 cbnz x2, 1b
188
189 ret
190endfunc denver_disable_dco
191
192 /* -------------------------------------------------
193 * The CPU Ops reset function for Denver.
194 * -------------------------------------------------
195 */
196func denver_reset_func
197
198 mov x19, x30
199
Varun Wadekare34bd092018-01-10 17:03:22 -0800200#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
201 /*
202 * Check if the CPU supports the special instruction
203 * required to flush the indirect branch predictor and
204 * RSB. Support for this operation can be determined by
205 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
206 */
207 mrs x0, id_afr0_el1
208 mov x1, #0x10000
209 and x0, x0, x1
210 cmp x0, #0
211 adr x1, workaround_bpflush_runtime_exceptions
212 mrs x2, vbar_el3
213 csel x0, x1, x2, ne
214 msr vbar_el3, x0
215#endif
216
Varun Wadekar28463b92015-07-14 17:11:20 +0530217 /* ----------------------------------------------------
218 * Enable dynamic code optimizer (DCO)
219 * ----------------------------------------------------
220 */
221 bl denver_enable_dco
222
223 ret x19
224endfunc denver_reset_func
225
226 /* ----------------------------------------------------
227 * The CPU Ops core power down function for Denver.
228 * ----------------------------------------------------
229 */
230func denver_core_pwr_dwn
231
232 mov x19, x30
233
Varun Wadekar28463b92015-07-14 17:11:20 +0530234 /* ---------------------------------------------
235 * Force the debug interfaces to be quiescent
236 * ---------------------------------------------
237 */
238 bl denver_disable_ext_debug
239
240 ret x19
241endfunc denver_core_pwr_dwn
242
243 /* -------------------------------------------------------
244 * The CPU Ops cluster power down function for Denver.
245 * -------------------------------------------------------
246 */
247func denver_cluster_pwr_dwn
248 ret
249endfunc denver_cluster_pwr_dwn
250
251 /* ---------------------------------------------
252 * This function provides Denver specific
253 * register information for crash reporting.
254 * It needs to return with x6 pointing to
255 * a list of register names in ascii and
256 * x8 - x15 having values of registers to be
257 * reported.
258 * ---------------------------------------------
259 */
260.section .rodata.denver_regs, "aS"
261denver_regs: /* The ascii list of register names to be reported */
262 .asciz "actlr_el1", ""
263
264func denver_cpu_reg_dump
265 adr x6, denver_regs
266 mrs x8, ACTLR_EL1
267 ret
268endfunc denver_cpu_reg_dump
269
Varun Wadekar3c337a62015-09-03 17:15:06 +0530270declare_cpu_ops denver, DENVER_MIDR_PN0, \
271 denver_reset_func, \
272 denver_core_pwr_dwn, \
273 denver_cluster_pwr_dwn
274
275declare_cpu_ops denver, DENVER_MIDR_PN1, \
276 denver_reset_func, \
277 denver_core_pwr_dwn, \
278 denver_cluster_pwr_dwn
279
280declare_cpu_ops denver, DENVER_MIDR_PN2, \
281 denver_reset_func, \
282 denver_core_pwr_dwn, \
283 denver_cluster_pwr_dwn
284
285declare_cpu_ops denver, DENVER_MIDR_PN3, \
286 denver_reset_func, \
287 denver_core_pwr_dwn, \
288 denver_cluster_pwr_dwn
289
290declare_cpu_ops denver, DENVER_MIDR_PN4, \
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +0000291 denver_reset_func, \
292 denver_core_pwr_dwn, \
293 denver_cluster_pwr_dwn