blob: 17592d34908aaf9879b68ce2facdf438dc1a24c2 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Boyan Karatotev6c473862025-01-21 11:41:46 +00002 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +00006#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
Achin Gupta4f6ad662013-10-25 09:08:21 +01008
Antonio Nino Diaza9044872019-02-12 11:25:02 +00009#include <assert_macros.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000010#include <lib/cpus/cpu_ops.h>
Boyan Karatotev5d38cb32023-01-27 09:37:07 +000011#include <lib/cpus/errata.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010012
Soby Mathewc704cbc2014-08-14 11:33:56 +010013 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000014 * Write given expressions as quad words
15 *
16 * _count:
17 * Write at least _count quad words. If the given number of
18 * expressions is less than _count, repeat the last expression to
19 * fill _count quad words in total
20 * _rest:
21 * Optional list of expressions. _this is for parameter extraction
22 * only, and has no significance to the caller
23 *
24 * Invoked as:
25 * fill_constants 2, foo, bar, blah, ...
Achin Gupta4f6ad662013-10-25 09:08:21 +010026 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000027 .macro fill_constants _count:req, _this, _rest:vararg
28 .ifgt \_count
29 /* Write the current expression */
30 .ifb \_this
31 .error "Nothing to fill"
32 .endif
33 .quad \_this
34
35 /* Invoke recursively for remaining expressions */
36 .ifnb \_rest
37 fill_constants \_count-1, \_rest
38 .else
39 fill_constants \_count-1, \_this
40 .endif
41 .endif
42 .endm
43
44 /*
45 * Declare CPU operations
46 *
47 * _name:
48 * Name of the CPU for which operations are being specified
49 * _midr:
50 * Numeric value expected to read from CPU's MIDR
51 * _resetfunc:
52 * Reset function for the CPU. If there's no CPU reset function,
53 * specify CPU_NO_RESET_FUNC
Dimitris Papastamos914757c2018-03-12 14:47:09 +000054 * _extra1:
55 * This is a placeholder for future per CPU operations. Currently,
56 * some CPUs use this entry to set a test function to determine if
57 * the workaround for CVE-2017-5715 needs to be applied or not.
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +010058 * _extra2:
Bipin Ravicaa2e052022-02-23 23:45:50 -060059 * This is a placeholder for future per CPU operations. Currently
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +010060 * some CPUs use this entry to set a function to disable the
61 * workaround for CVE-2018-3639.
Bipin Ravicaa2e052022-02-23 23:45:50 -060062 * _extra3:
63 * This is a placeholder for future per CPU operations. Currently,
64 * some CPUs use this entry to set a test function to determine if
65 * the workaround for CVE-2022-23960 needs to be applied or not.
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -050066 * _extra4:
67 * This is a placeholder for future per CPU operations. Currently,
68 * some CPUs use this entry to set a test function to determine if
69 * the workaround for CVE-2024-7881 needs to be applied or not.
laurenw-arm94accd32019-08-20 15:51:24 -050070 * _e_handler:
71 * This is a placeholder for future per CPU exception handlers.
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000072 * _power_down_ops:
73 * Comma-separated list of functions to perform power-down
74 * operatios on the CPU. At least one, and up to
75 * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
76 * Starting at power level 0, these functions shall handle power
77 * down at subsequent power levels. If there aren't exactly
78 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
79 * used to handle power down at subsequent levels
80 */
Dimitris Papastamos914757c2018-03-12 14:47:09 +000081 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -050082 _extra1:req, _extra2:req, _extra3:req, _extra4:req, \
83 _e_handler:req, _power_down_ops:vararg
Chris Kay33bfc5e2023-02-14 11:30:04 +000084 .section .cpu_ops, "a"
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000085 .align 3
Soby Mathewc704cbc2014-08-14 11:33:56 +010086 .type cpu_ops_\_name, %object
87 .quad \_midr
Roberto Vargase0e99462017-10-30 14:43:43 +000088#if defined(IMAGE_AT_EL3)
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000089 .quad \_resetfunc
Soby Mathewc704cbc2014-08-14 11:33:56 +010090#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +000091 .quad \_extra1
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +010092 .quad \_extra2
Bipin Ravicaa2e052022-02-23 23:45:50 -060093 .quad \_extra3
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -050094 .quad \_extra4
laurenw-arm94accd32019-08-20 15:51:24 -050095 .quad \_e_handler
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090096#ifdef IMAGE_BL31
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000097 /* Insert list of functions */
98 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +010099#endif
Boyan Karatotev821364e2023-01-27 09:35:10 +0000100 /*
101 * It is possible (although unlikely) that a cpu may have no errata in
102 * code. In that case the start label will not be defined. The list is
103 * intended to be used in a loop, so define it as zero-length for
104 * predictable behaviour. Since this macro is always called at the end
105 * of the cpu file (after all errata have been parsed) we can be sure
106 * that we are at the end of the list. Some cpus call declare_cpu_ops
107 * twice, so only do this once.
108 */
109 .pushsection .rodata.errata_entries
110 .ifndef \_name\()_errata_list_start
111 \_name\()_errata_list_start:
112 .endif
113 .ifndef \_name\()_errata_list_end
114 \_name\()_errata_list_end:
115 .endif
116 .popsection
117
118 /* and now put them in cpu_ops */
119 .quad \_name\()_errata_list_start
120 .quad \_name\()_errata_list_end
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000121
122#if REPORT_ERRATA
123 .ifndef \_name\()_cpu_str
124 /*
125 * Place errata reported flag, and the spinlock to arbitrate access to
126 * it in the data section.
127 */
128 .pushsection .data
129 define_asm_spinlock \_name\()_errata_lock
130 \_name\()_errata_reported:
131 .word 0
132 .popsection
133
134 /* Place CPU string in rodata */
135 .pushsection .rodata
136 \_name\()_cpu_str:
137 .asciz "\_name"
138 .popsection
139 .endif
140
Boyan Karatotev821364e2023-01-27 09:35:10 +0000141 .quad \_name\()_cpu_str
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000142
143#ifdef IMAGE_BL31
144 /* Pointers to errata lock and reported flag */
145 .quad \_name\()_errata_lock
146 .quad \_name\()_errata_reported
Boyan Karatotev821364e2023-01-27 09:35:10 +0000147#endif /* IMAGE_BL31 */
148#endif /* REPORT_ERRATA */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000149
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900150#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100151 .quad \_name\()_cpu_reg_dump
152#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100153 .endm
Dan Handleyea596682015-04-01 17:34:24 +0100154
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000155 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
156 _power_down_ops:vararg
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -0500157 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000158 \_power_down_ops
159 .endm
160
laurenw-arm94accd32019-08-20 15:51:24 -0500161 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
162 _e_handler:req, _power_down_ops:vararg
163 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -0500164 0, 0, 0, 0, \_e_handler, \_power_down_ops
laurenw-arm94accd32019-08-20 15:51:24 -0500165 .endm
166
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100167 .macro declare_cpu_ops_wa _name:req, _midr:req, \
168 _resetfunc:req, _extra1:req, _extra2:req, \
Bipin Ravicaa2e052022-02-23 23:45:50 -0600169 _extra3:req, _power_down_ops:vararg
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000170 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -0500171 \_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops
172 .endm
173
174 .macro declare_cpu_ops_wa_4 _name:req, _midr:req, \
175 _resetfunc:req, _extra1:req, _extra2:req, \
176 _extra3:req, _extra4:req, _power_down_ops:vararg
177 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
178 \_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000179 .endm
180
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000181 /*
182 * This macro is used on some CPUs to detect if they are vulnerable
183 * to CVE-2017-5715.
184 */
185 .macro cpu_check_csv2 _reg _label
186 mrs \_reg, id_aa64pfr0_el1
187 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
188 /*
Antonio Nino Diaza9044872019-02-12 11:25:02 +0000189 * If the field equals 1, branch targets trained in one context cannot
190 * affect speculative execution in a different context.
191 *
192 * If the field equals 2, it means that the system is also aware of
193 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
194 * expect users of the registers to do the right thing.
195 *
196 * Only apply mitigations if the value of this field is 0.
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000197 */
Antonio Nino Diaza9044872019-02-12 11:25:02 +0000198#if ENABLE_ASSERTIONS
199 cmp \_reg, #3 /* Only values 0 to 2 are expected */
200 ASM_ASSERT(lo)
201#endif
202
203 cmp \_reg, #0
204 bne \_label
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000205 .endm
Deepak Pandeyb5615362018-10-11 13:44:43 +0530206
207 /*
208 * Helper macro that reads the part number of the current
209 * CPU and jumps to the given label if it matches the CPU
210 * MIDR provided.
211 *
212 * Clobbers x0.
213 */
214 .macro jump_if_cpu_midr _cpu_midr, _label
215 mrs x0, midr_el1
216 ubfx x0, x0, MIDR_PN_SHIFT, #12
217 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
218 b.eq \_label
219 .endm
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000220
Boyan Karatotev821364e2023-01-27 09:35:10 +0000221
222/*
223 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
224 * will be applied automatically
225 *
226 * _cpu:
227 * Name of cpu as given to declare_cpu_ops
228 *
229 * _cve:
230 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
231 *
232 * _id:
233 * Erratum or CVE number. Please combine with previous field with ERRATUM
234 * or CVE macros
235 *
236 * _chosen:
237 * Compile time flag on whether the erratum is included
238 *
239 * _apply_at_reset:
240 * Whether the erratum should be automatically applied at reset
241 */
242.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
243 .pushsection .rodata.errata_entries
244 .align 3
245 .ifndef \_cpu\()_errata_list_start
246 \_cpu\()_errata_list_start:
247 .endif
248
249 /* check if unused and compile out if no references */
250 .if \_apply_at_reset && \_chosen
251 .quad erratum_\_cpu\()_\_id\()_wa
252 .else
253 .quad 0
254 .endif
255 /* TODO(errata ABI): this prevents all checker functions from
256 * being optimised away. Can be done away with unless the ABI
257 * needs them */
258 .quad check_erratum_\_cpu\()_\_id
259 /* Will fit CVEs with up to 10 character in the ID field */
260 .word \_id
261 .hword \_cve
262 .byte \_chosen
263 /* TODO(errata ABI): mitigated field for known but unmitigated
264 * errata */
265 .byte 0x1
266 .popsection
267.endm
268
269.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
270 add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
271
272 func erratum_\_cpu\()_\_id\()_wa
273 mov x8, x30
274
275 /* save rev_var for workarounds that might need it but don't
276 * restore to x0 because few will care */
277 mov x7, x0
278 bl check_erratum_\_cpu\()_\_id
279 cbz x0, erratum_\_cpu\()_\_id\()_skip
280.endm
281
282.macro _workaround_end _cpu:req, _id:req
283 erratum_\_cpu\()_\_id\()_skip:
284 ret x8
285 endfunc erratum_\_cpu\()_\_id\()_wa
286.endm
287
288/*******************************************************************************
289 * Errata workaround wrappers
290 ******************************************************************************/
291/*
292 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
293 * will be applied automatically
294 *
295 * _cpu:
296 * Name of cpu as given to declare_cpu_ops
297 *
298 * _cve:
299 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
300 *
301 * _id:
302 * Erratum or CVE number. Please combine with previous field with ERRATUM
303 * or CVE macros
304 *
305 * _chosen:
306 * Compile time flag on whether the erratum is included
307 *
308 * in body:
309 * clobber x0 to x7 (please only use those)
310 * argument x7 - cpu_rev_var
311 *
312 * _wa clobbers: x0-x8 (PCS compliant)
313 */
314.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
315 _workaround_start \_cpu, \_cve, \_id, \_chosen, 1
316.endm
317
318/*
319 * See `workaround_reset_start` for usage info. Additional arguments:
320 *
321 * _midr:
322 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified
323 * for errata applied in generic code
324 */
325.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
326 /*
327 * Let errata specify if they need MIDR checking. Sadly, storing the
328 * MIDR in an .equ to retrieve automatically blows up as it stores some
329 * brackets in the symbol
330 */
331 .ifnb \_midr
332 jump_if_cpu_midr \_midr, 1f
333 b erratum_\_cpu\()_\_id\()_skip
334
335 1:
336 .endif
337 _workaround_start \_cpu, \_cve, \_id, \_chosen, 0
338.endm
339
340/*
341 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
342 * is kept here so the same #define can be used as that macro
343 */
344.macro workaround_reset_end _cpu:req, _cve:req, _id:req
345 _workaround_end \_cpu, \_id
346.endm
347
348/*
349 * See `workaround_reset_start` for usage info. The _cve argument is kept here
350 * so the same #define can be used as that macro. Additional arguments:
351 *
352 * _no_isb:
353 * Optionally do not include the trailing isb. Please disable with the
354 * NO_ISB macro
355 */
356.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
357 /*
358 * Runtime errata do not have a reset function to call the isb for them
359 * and missing the isb could be very problematic. It is also likely as
360 * they tend to be scattered in generic code.
361 */
362 .ifb \_no_isb
363 isb
364 .endif
365 _workaround_end \_cpu, \_id
366.endm
367
368/*******************************************************************************
369 * Errata workaround helpers
370 ******************************************************************************/
371/*
372 * Set a bit in a system register. Can set multiple bits but is limited by the
373 * way the ORR instruction encodes them.
374 *
375 * _reg:
376 * Register to write to
377 *
378 * _bit:
379 * Bit to set. Please use a descriptive #define
380 *
381 * _assert:
382 * Optionally whether to read back and assert that the bit has been
383 * written. Please disable with NO_ASSERT macro
384 *
385 * clobbers: x1
386 */
387.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
388 mrs x1, \_reg
389 orr x1, x1, #\_bit
390 msr \_reg, x1
391.endm
392
393/*
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100394 * Clear a bit in a system register. Can clear multiple bits but is limited by
395 * the way the BIC instrucion encodes them.
396 *
397 * see sysreg_bit_set for usage
398 */
399.macro sysreg_bit_clear _reg:req, _bit:req
400 mrs x1, \_reg
401 bic x1, x1, #\_bit
402 msr \_reg, x1
403.endm
404
Boyan Karatotev6c473862025-01-21 11:41:46 +0000405/*
406 * Toggle a bit in a system register. Can toggle multiple bits but is limited by
407 * the way the EOR instrucion encodes them.
408 *
409 * see sysreg_bit_set for usage
410 */
411.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
412 mrs x1, \_reg
413 eor x1, x1, #\_bit
414 msr \_reg, x1
415.endm
416
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100417.macro override_vector_table _table:req
418 adr x1, \_table
419 msr vbar_el3, x1
420.endm
421
422/*
Jayanth Dodderi Chidanandf566e102023-06-19 16:20:02 +0100423 * BFI : Inserts bitfield into a system register.
424 *
425 * BFI{cond} Rd, Rn, #lsb, #width
426 */
427.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
428 /* Source value for BFI */
429 mov x1, #\_src
430 mrs x0, \_reg
431 bfi x0, x1, #\_lsb, #\_width
432 msr \_reg, x0
433.endm
434
Jagdish Gediya1670a2b2024-07-23 12:54:28 +0100435.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
436 /* Source value in register for BFI */
437 mov x1, \_gpr
438 mrs x0, \_reg
439 bfi x0, x1, #\_lsb, #\_width
440 msr \_reg, x0
441.endm
442
Jayanth Dodderi Chidanandf566e102023-06-19 16:20:02 +0100443/*
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000444 * Extract CPU revision and variant, and combine them into a single numeric for
445 * easier comparison.
446 *
447 * _res:
448 * register where the result will be placed
449 * _tmp:
450 * register to clobber for temporaries
451 */
452.macro get_rev_var _res:req, _tmp:req
453 mrs \_tmp, midr_el1
454
455 /*
456 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
457 * as variant[7:4] and revision[3:0] of x0.
458 *
459 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
460 * extract x1[3:0] into x0[3:0] retaining other bits.
461 */
462 ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
463 bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
464.endm
465
466/*
Boyan Karatotev821364e2023-01-27 09:35:10 +0000467 * Apply erratum
468 *
469 * _cpu:
470 * Name of cpu as given to declare_cpu_ops
471 *
472 * _cve:
473 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
474 *
475 * _id:
476 * Erratum or CVE number. Please combine with previous field with ERRATUM
477 * or CVE macros
478 *
479 * _chosen:
480 * Compile time flag on whether the erratum is included
481 *
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100482 * _get_rev:
483 * Optional parameter that determines whether to insert a call to the CPU revision fetching
Boyan Karatotev24395f42024-09-26 17:09:53 +0100484 * procedure. Stores the result of this in the temporary register x10 to allow for chaining
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100485 *
486 * clobbers: x0-x10 (PCS compliant)
Boyan Karatotev821364e2023-01-27 09:35:10 +0000487 */
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100488.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
Boyan Karatotevcd7482a2024-09-26 17:00:09 +0100489 .if (\_chosen && \_get_rev)
Boyan Karatotev821364e2023-01-27 09:35:10 +0000490 mov x9, x30
491 bl cpu_get_rev_var
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100492 mov x10, x0
493 .elseif (\_chosen)
494 mov x9, x30
495 mov x0, x10
496 .endif
497
498 .if \_chosen
Boyan Karatotev821364e2023-01-27 09:35:10 +0000499 bl erratum_\_cpu\()_\_id\()_wa
500 mov x30, x9
Boyan Karatotev821364e2023-01-27 09:35:10 +0000501 .endif
502.endm
503
504/*
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000505 * Helpers to report if an erratum applies. Compares the given revision variant
506 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
507 *
508 * _rev_num: the given revision variant. Or
509 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
510 *
511 * in body:
512 * clobber: x0
513 * argument: x0 - cpu_rev_var
514 */
515.macro cpu_rev_var_ls _rev_num:req
516 cmp x0, #\_rev_num
517 cset x0, ls
518.endm
519
520.macro cpu_rev_var_hs _rev_num:req
521 cmp x0, #\_rev_num
522 cset x0, hs
523.endm
524
525.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
526 cmp x0, #\_rev_num_lo
527 mov x1, #\_rev_num_hi
528 ccmp x0, x1, #2, hs
529 cset x0, ls
530.endm
531
532/*
533 * Helpers to select which revisions errata apply to.
Boyan Karatotev821364e2023-01-27 09:35:10 +0000534 *
535 * _cpu:
536 * Name of cpu as given to declare_cpu_ops
537 *
538 * _cve:
539 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
540 *
541 * _id:
542 * Erratum or CVE number. Please combine with previous field with ERRATUM
543 * or CVE macros
544 *
545 * _rev_num:
546 * Revision to apply to
547 *
548 * in body:
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000549 * clobber: x0 to x1
Boyan Karatotev821364e2023-01-27 09:35:10 +0000550 * argument: x0 - cpu_rev_var
551 */
552.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
553 func check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000554 cpu_rev_var_ls \_rev_num
555 ret
Boyan Karatotev821364e2023-01-27 09:35:10 +0000556 endfunc check_erratum_\_cpu\()_\_id
557.endm
558
559.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
560 func check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000561 cpu_rev_var_hs \_rev_num
562 ret
Boyan Karatotev821364e2023-01-27 09:35:10 +0000563 endfunc check_erratum_\_cpu\()_\_id
564.endm
565
566.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
567 func check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000568 cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
569 ret
Boyan Karatotev821364e2023-01-27 09:35:10 +0000570 endfunc check_erratum_\_cpu\()_\_id
571.endm
572
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100573.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
574 func check_erratum_\_cpu\()_\_id
575 .if \_chosen
576 mov x0, #ERRATA_APPLIES
577 .else
578 mov x0, #ERRATA_MISSING
579 .endif
580 ret
581 endfunc check_erratum_\_cpu\()_\_id
582.endm
583
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000584/*
585 * provide a shorthand for the name format for annoying errata
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000586 * body: clobber x0 to x4
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000587 */
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100588.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
589 func check_erratum_\_cpu\()_\_id
590.endm
591
592.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
593 endfunc check_erratum_\_cpu\()_\_id
594.endm
595
596
Boyan Karatotev821364e2023-01-27 09:35:10 +0000597/*******************************************************************************
598 * CPU reset function wrapper
599 ******************************************************************************/
600
601/*
602 * Wrapper to automatically apply all reset-time errata. Will end with an isb.
603 *
604 * _cpu:
605 * Name of cpu as given to declare_cpu_ops
606 *
607 * in body:
608 * clobber x8 to x14
609 * argument x14 - cpu_rev_var
610 */
611.macro cpu_reset_func_start _cpu:req
612 func \_cpu\()_reset_func
613 mov x15, x30
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000614 get_rev_var x14, x0
Boyan Karatotev821364e2023-01-27 09:35:10 +0000615
616 /* short circuit the location to avoid searching the list */
617 adrp x12, \_cpu\()_errata_list_start
618 add x12, x12, :lo12:\_cpu\()_errata_list_start
619 adrp x13, \_cpu\()_errata_list_end
620 add x13, x13, :lo12:\_cpu\()_errata_list_end
621
622 errata_begin:
623 /* if head catches up with end of list, exit */
624 cmp x12, x13
625 b.eq errata_end
626
627 ldr x10, [x12, #ERRATUM_WA_FUNC]
628 /* TODO(errata ABI): check mitigated and checker function fields
629 * for 0 */
630 ldrb w11, [x12, #ERRATUM_CHOSEN]
631
632 /* skip if not chosen */
633 cbz x11, 1f
634 /* skip if runtime erratum */
635 cbz x10, 1f
636
637 /* put cpu revision in x0 and call workaround */
638 mov x0, x14
639 blr x10
640 1:
641 add x12, x12, #ERRATUM_ENTRY_SIZE
642 b errata_begin
643 errata_end:
644.endm
645
646.macro cpu_reset_func_end _cpu:req
647 isb
648 ret x15
649 endfunc \_cpu\()_reset_func
650.endm
Boyan Karatotev29fa56d2023-01-27 09:38:15 +0000651
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000652#endif /* CPU_MACROS_S */