blob: 808c7f807a519dbb26af4faaf878542b5d854a94 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Varun Wadekar4d034c52019-01-11 14:47:48 -08002 * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010015
16 /* Reset fn is needed in BL at reset vector */
Roberto Vargase0e99462017-10-30 14:43:43 +000017#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathewc704cbc2014-08-14 11:33:56 +010018 /*
19 * The reset handler common to all platforms. After a matching
20 * cpu_ops structure entry is found, the correponding reset_handler
21 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000022 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010023 */
24 .globl reset_handler
25func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010026 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010027
Soby Mathewb5a63042015-01-29 12:00:58 +000028 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010029 bl plat_reset_handler
30
Soby Mathewc704cbc2014-08-14 11:33:56 +010031 /* Get the matching cpu_ops pointer */
32 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010033#if ENABLE_ASSERTIONS
Soby Mathewc704cbc2014-08-14 11:33:56 +010034 cmp x0, #0
35 ASM_ASSERT(ne)
36#endif
37
38 /* Get the cpu_ops reset handler */
39 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010040 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010041 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000042
43 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010044 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100451:
Soby Mathewc0884332014-09-22 12:11:36 +010046 ret
Kévin Petita877c252015-03-24 14:03:57 +000047endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010048
Roberto Vargase0e99462017-10-30 14:43:43 +000049#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010050
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090051#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010052 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000053 * void prepare_cpu_pwr_dwn(unsigned int power_level)
54 *
55 * Prepare CPU power down function for all platforms. The function takes
56 * a domain level to be powered down as its parameter. After the cpu_ops
57 * pointer is retrieved from cpu_data, the handler for requested power
58 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010059 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000060 .globl prepare_cpu_pwr_dwn
61func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010062 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000063 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
64 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010065 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000066 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
67 cmp x0, x2
68 csel x2, x2, x0, hi
69
Soby Mathew8e2f2872014-08-14 12:49:05 +010070 mrs x1, tpidr_el3
71 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010072#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010073 cmp x0, #0
74 ASM_ASSERT(ne)
75#endif
76
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000077 /* Get the appropriate power down handler */
78 mov x1, #CPU_PWR_DWN_OPS
79 add x1, x1, x2, lsl #3
80 ldr x1, [x0, x1]
Soby Mathew8e2f2872014-08-14 12:49:05 +010081 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000082endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010083
84
85 /*
86 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000087 * in cpu_data. This can be called without a runtime stack, but may
88 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010089 * clobbers: x0 - x6, x10
90 */
91 .globl init_cpu_ops
92func init_cpu_ops
93 mrs x6, tpidr_el3
94 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
95 cbnz x0, 1f
96 mov x10, x30
97 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010098#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010099 cmp x0, #0
100 ASM_ASSERT(ne)
101#endif
Soby Mathew7d861ea2014-11-18 10:14:14 +0000102 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100103 mov x30, x10
1041:
105 ret
Kévin Petita877c252015-03-24 14:03:57 +0000106endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100107#endif /* IMAGE_BL31 */
108
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900109#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100110 /*
111 * The cpu specific registers which need to be reported in a crash
112 * are reported via cpu_ops cpu_reg_dump function. After a matching
113 * cpu_ops structure entry is found, the correponding cpu_reg_dump
114 * in the cpu_ops is invoked.
115 */
116 .globl do_cpu_reg_dump
117func do_cpu_reg_dump
118 mov x16, x30
119
120 /* Get the matching cpu_ops pointer */
121 bl get_cpu_ops_ptr
122 cbz x0, 1f
123
124 /* Get the cpu_ops cpu_reg_dump */
125 ldr x2, [x0, #CPU_REG_DUMP]
126 cbz x2, 1f
127 blr x2
1281:
129 mov x30, x16
130 ret
Kévin Petita877c252015-03-24 14:03:57 +0000131endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100132#endif
133
Soby Mathewc704cbc2014-08-14 11:33:56 +0100134 /*
135 * The below function returns the cpu_ops structure matching the
136 * midr of the core. It reads the MIDR_EL1 and finds the matching
137 * entry in cpu_ops entries. Only the implementation and part number
138 * are used to match the entries.
139 * Return :
140 * x0 - The matching cpu_ops pointer on Success
141 * x0 - 0 on failure.
142 * Clobbers : x0 - x5
143 */
144 .globl get_cpu_ops_ptr
145func get_cpu_ops_ptr
146 /* Get the cpu_ops start and end locations */
147 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
148 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
149
150 /* Initialize the return parameter */
151 mov x0, #0
152
153 /* Read the MIDR_EL1 */
154 mrs x2, midr_el1
155 mov_imm x3, CPU_IMPL_PN_MASK
156
157 /* Retain only the implementation and part number using mask */
158 and w2, w2, w3
1591:
160 /* Check if we have reached end of list */
161 cmp x4, x5
162 b.eq error_exit
163
164 /* load the midr from the cpu_ops */
165 ldr x1, [x4], #CPU_OPS_SIZE
166 and w1, w1, w3
167
168 /* Check if midr matches to midr of this core */
169 cmp w1, w2
170 b.ne 1b
171
172 /* Subtract the increment and offset to get the cpu-ops pointer */
173 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
174error_exit:
175 ret
Kévin Petita877c252015-03-24 14:03:57 +0000176endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100177
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000178/*
179 * Extract CPU revision and variant, and combine them into a single numeric for
180 * easier comparison.
181 */
182 .globl cpu_get_rev_var
183func cpu_get_rev_var
184 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100185
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000186 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000187 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
188 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000189 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000190 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
191 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000192 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000193 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
194 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
195 ret
196endfunc cpu_get_rev_var
197
198/*
199 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
200 * application purposes. If the revision-variant is less than or same as a given
201 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100202 *
203 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000204 */
205 .globl cpu_rev_var_ls
206func cpu_rev_var_ls
207 mov x2, #ERRATA_APPLIES
208 mov x3, #ERRATA_NOT_APPLIES
209 cmp x0, x1
210 csel x0, x2, x3, ls
211 ret
212endfunc cpu_rev_var_ls
213
Andre Przywara00eefd92016-10-06 16:54:53 +0100214/*
215 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
216 * application purposes. If the revision-variant is higher than or same as a
217 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100218 *
219 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100220 */
221 .globl cpu_rev_var_hs
222func cpu_rev_var_hs
223 mov x2, #ERRATA_APPLIES
224 mov x3, #ERRATA_NOT_APPLIES
225 cmp x0, x1
226 csel x0, x2, x3, hs
227 ret
228endfunc cpu_rev_var_hs
229
laurenw-arm94accd32019-08-20 15:51:24 -0500230/*
231 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
232 * application purposes. If the revision-variant is between or includes the given
233 * values, this indicates that errata applies; otherwise not.
234 *
235 * Shall clobber: x0-x4
236 */
237 .globl cpu_rev_var_range
238func cpu_rev_var_range
239 mov x3, #ERRATA_APPLIES
240 mov x4, #ERRATA_NOT_APPLIES
241 cmp x0, x1
242 csel x1, x3, x4, hs
243 cbz x1, 1f
244 cmp x0, x2
245 csel x1, x3, x4, ls
2461:
247 mov x0, x1
248 ret
249endfunc cpu_rev_var_range
250
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000251#if REPORT_ERRATA
252/*
253 * void print_errata_status(void);
254 *
255 * Function to print errata status for CPUs of its class. Must be called only:
256 *
257 * - with MMU and data caches are enabled;
258 * - after cpu_ops have been initialized in per-CPU data.
259 */
260 .globl print_errata_status
261func print_errata_status
262#ifdef IMAGE_BL1
263 /*
264 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
265 * directly.
266 */
267 stp xzr, x30, [sp, #-16]!
268 bl get_cpu_ops_ptr
269 ldp xzr, x30, [sp], #16
270 ldr x1, [x0, #CPU_ERRATA_FUNC]
271 cbnz x1, .Lprint
272#else
273 /*
274 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
275 * errata printing function. If it's non-NULL, jump to the function in
276 * turn.
277 */
278 mrs x0, tpidr_el3
279 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
280 ldr x0, [x1, #CPU_ERRATA_FUNC]
281 cbz x0, .Lnoprint
282
283 /*
284 * Printing errata status requires atomically testing the printed flag.
285 */
dp-arm815faa82017-05-05 12:21:03 +0100286 stp x19, x30, [sp, #-16]!
287 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100288
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000289 /*
290 * Load pointers to errata lock and printed flag. Call
291 * errata_needs_reporting to check whether this CPU needs to report
292 * errata status pertaining to its class.
293 */
294 ldr x0, [x1, #CPU_ERRATA_LOCK]
295 ldr x1, [x1, #CPU_ERRATA_PRINTED]
296 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100297 mov x1, x19
298 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000299 cbnz x0, .Lprint
300#endif
301.Lnoprint:
302 ret
303.Lprint:
304 /* Jump to errata reporting function for this CPU */
305 br x1
306endfunc print_errata_status
307#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000308
309/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100310 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000311 *
312 * This function returns:
313 * - ERRATA_APPLIES when firmware mitigation is required.
314 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
315 * - ERRATA_MISSING when firmware mitigation would be required but
316 * is not compiled in.
317 *
318 * NOTE: Must be called only after cpu_ops have been initialized
319 * in per-CPU data.
320 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100321 .globl check_wa_cve_2017_5715
322func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000323 mrs x0, tpidr_el3
324#if ENABLE_ASSERTIONS
325 cmp x0, #0
326 ASM_ASSERT(ne)
327#endif
328 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
329 ldr x0, [x0, #CPU_EXTRA1_FUNC]
330 /*
331 * If the reserved function pointer is NULL, this CPU
332 * is unaffected by CVE-2017-5715 so bail out.
333 */
334 cmp x0, #0
335 beq 1f
336 br x0
3371:
338 mov x0, #ERRATA_NOT_APPLIES
339 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100340endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100341
342/*
343 * void *wa_cve_2018_3639_get_disable_ptr(void);
344 *
345 * Returns a function pointer which is used to disable mitigation
346 * for CVE-2018-3639.
347 * The function pointer is only returned on cores that employ
348 * dynamic mitigation. If the core uses static mitigation or is
349 * unaffected by CVE-2018-3639 this function returns NULL.
350 *
351 * NOTE: Must be called only after cpu_ops have been initialized
352 * in per-CPU data.
353 */
354 .globl wa_cve_2018_3639_get_disable_ptr
355func wa_cve_2018_3639_get_disable_ptr
356 mrs x0, tpidr_el3
357#if ENABLE_ASSERTIONS
358 cmp x0, #0
359 ASM_ASSERT(ne)
360#endif
361 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
362 ldr x0, [x0, #CPU_EXTRA2_FUNC]
363 ret
364endfunc wa_cve_2018_3639_get_disable_ptr