blob: c43beb6175737e37fe3cf20adf55d6cbe062f8e1 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Boyan Karatotev6c473862025-01-21 11:41:46 +00002 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +00006#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
Achin Gupta4f6ad662013-10-25 09:08:21 +01008
Antonio Nino Diaza9044872019-02-12 11:25:02 +00009#include <assert_macros.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000010#include <lib/cpus/cpu_ops.h>
Boyan Karatotev5d38cb32023-01-27 09:37:07 +000011#include <lib/cpus/errata.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010012
Soby Mathewc704cbc2014-08-14 11:33:56 +010013 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000014 * Write given expressions as quad words
15 *
16 * _count:
17 * Write at least _count quad words. If the given number of
18 * expressions is less than _count, repeat the last expression to
19 * fill _count quad words in total
20 * _rest:
21 * Optional list of expressions. _this is for parameter extraction
22 * only, and has no significance to the caller
23 *
24 * Invoked as:
25 * fill_constants 2, foo, bar, blah, ...
Achin Gupta4f6ad662013-10-25 09:08:21 +010026 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000027 .macro fill_constants _count:req, _this, _rest:vararg
28 .ifgt \_count
29 /* Write the current expression */
30 .ifb \_this
31 .error "Nothing to fill"
32 .endif
33 .quad \_this
34
35 /* Invoke recursively for remaining expressions */
36 .ifnb \_rest
37 fill_constants \_count-1, \_rest
38 .else
39 fill_constants \_count-1, \_this
40 .endif
41 .endif
42 .endm
43
44 /*
45 * Declare CPU operations
46 *
47 * _name:
48 * Name of the CPU for which operations are being specified
49 * _midr:
50 * Numeric value expected to read from CPU's MIDR
51 * _resetfunc:
Boyan Karatotev1dcba8f2024-11-19 11:27:01 +000052 * Reset function for the CPU.
Dimitris Papastamos914757c2018-03-12 14:47:09 +000053 * _extra1:
54 * This is a placeholder for future per CPU operations. Currently,
55 * some CPUs use this entry to set a test function to determine if
56 * the workaround for CVE-2017-5715 needs to be applied or not.
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +010057 * _extra2:
Bipin Ravicaa2e052022-02-23 23:45:50 -060058 * This is a placeholder for future per CPU operations. Currently
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +010059 * some CPUs use this entry to set a function to disable the
60 * workaround for CVE-2018-3639.
Bipin Ravicaa2e052022-02-23 23:45:50 -060061 * _extra3:
62 * This is a placeholder for future per CPU operations. Currently,
63 * some CPUs use this entry to set a test function to determine if
64 * the workaround for CVE-2022-23960 needs to be applied or not.
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -050065 * _extra4:
66 * This is a placeholder for future per CPU operations. Currently,
67 * some CPUs use this entry to set a test function to determine if
68 * the workaround for CVE-2024-7881 needs to be applied or not.
laurenw-arm94accd32019-08-20 15:51:24 -050069 * _e_handler:
70 * This is a placeholder for future per CPU exception handlers.
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000071 * _power_down_ops:
72 * Comma-separated list of functions to perform power-down
73 * operatios on the CPU. At least one, and up to
74 * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
75 * Starting at power level 0, these functions shall handle power
76 * down at subsequent power levels. If there aren't exactly
77 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
78 * used to handle power down at subsequent levels
79 */
Dimitris Papastamos914757c2018-03-12 14:47:09 +000080 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -050081 _extra1:req, _extra2:req, _extra3:req, _extra4:req, \
82 _e_handler:req, _power_down_ops:vararg
Chris Kay33bfc5e2023-02-14 11:30:04 +000083 .section .cpu_ops, "a"
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000084 .align 3
Soby Mathewc704cbc2014-08-14 11:33:56 +010085 .type cpu_ops_\_name, %object
86 .quad \_midr
Roberto Vargase0e99462017-10-30 14:43:43 +000087#if defined(IMAGE_AT_EL3)
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000088 .quad \_resetfunc
Soby Mathewc704cbc2014-08-14 11:33:56 +010089#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +000090 .quad \_extra1
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +010091 .quad \_extra2
Bipin Ravicaa2e052022-02-23 23:45:50 -060092 .quad \_extra3
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -050093 .quad \_extra4
laurenw-arm94accd32019-08-20 15:51:24 -050094 .quad \_e_handler
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090095#ifdef IMAGE_BL31
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000096 /* Insert list of functions */
97 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +010098#endif
Boyan Karatotev821364e2023-01-27 09:35:10 +000099 /*
100 * It is possible (although unlikely) that a cpu may have no errata in
101 * code. In that case the start label will not be defined. The list is
102 * intended to be used in a loop, so define it as zero-length for
103 * predictable behaviour. Since this macro is always called at the end
104 * of the cpu file (after all errata have been parsed) we can be sure
105 * that we are at the end of the list. Some cpus call declare_cpu_ops
106 * twice, so only do this once.
107 */
108 .pushsection .rodata.errata_entries
109 .ifndef \_name\()_errata_list_start
110 \_name\()_errata_list_start:
111 .endif
112 .ifndef \_name\()_errata_list_end
113 \_name\()_errata_list_end:
114 .endif
115 .popsection
116
117 /* and now put them in cpu_ops */
118 .quad \_name\()_errata_list_start
119 .quad \_name\()_errata_list_end
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000120
121#if REPORT_ERRATA
122 .ifndef \_name\()_cpu_str
123 /*
124 * Place errata reported flag, and the spinlock to arbitrate access to
125 * it in the data section.
126 */
127 .pushsection .data
128 define_asm_spinlock \_name\()_errata_lock
129 \_name\()_errata_reported:
130 .word 0
131 .popsection
132
133 /* Place CPU string in rodata */
134 .pushsection .rodata
135 \_name\()_cpu_str:
136 .asciz "\_name"
137 .popsection
138 .endif
139
Boyan Karatotev821364e2023-01-27 09:35:10 +0000140 .quad \_name\()_cpu_str
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000141
142#ifdef IMAGE_BL31
143 /* Pointers to errata lock and reported flag */
144 .quad \_name\()_errata_lock
145 .quad \_name\()_errata_reported
Boyan Karatotev821364e2023-01-27 09:35:10 +0000146#endif /* IMAGE_BL31 */
147#endif /* REPORT_ERRATA */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000148
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900149#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100150 .quad \_name\()_cpu_reg_dump
151#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100152 .endm
Dan Handleyea596682015-04-01 17:34:24 +0100153
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000154 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
155 _power_down_ops:vararg
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -0500156 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000157 \_power_down_ops
158 .endm
159
laurenw-arm94accd32019-08-20 15:51:24 -0500160 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
161 _e_handler:req, _power_down_ops:vararg
162 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -0500163 0, 0, 0, 0, \_e_handler, \_power_down_ops
laurenw-arm94accd32019-08-20 15:51:24 -0500164 .endm
165
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100166 .macro declare_cpu_ops_wa _name:req, _midr:req, \
167 _resetfunc:req, _extra1:req, _extra2:req, \
Bipin Ravicaa2e052022-02-23 23:45:50 -0600168 _extra3:req, _power_down_ops:vararg
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000169 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -0500170 \_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops
171 .endm
172
173 .macro declare_cpu_ops_wa_4 _name:req, _midr:req, \
174 _resetfunc:req, _extra1:req, _extra2:req, \
175 _extra3:req, _extra4:req, _power_down_ops:vararg
176 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
177 \_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000178 .endm
179
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000180 /*
181 * This macro is used on some CPUs to detect if they are vulnerable
182 * to CVE-2017-5715.
183 */
184 .macro cpu_check_csv2 _reg _label
185 mrs \_reg, id_aa64pfr0_el1
186 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
187 /*
Antonio Nino Diaza9044872019-02-12 11:25:02 +0000188 * If the field equals 1, branch targets trained in one context cannot
189 * affect speculative execution in a different context.
190 *
191 * If the field equals 2, it means that the system is also aware of
192 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
193 * expect users of the registers to do the right thing.
194 *
195 * Only apply mitigations if the value of this field is 0.
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000196 */
Antonio Nino Diaza9044872019-02-12 11:25:02 +0000197#if ENABLE_ASSERTIONS
198 cmp \_reg, #3 /* Only values 0 to 2 are expected */
199 ASM_ASSERT(lo)
200#endif
201
202 cmp \_reg, #0
203 bne \_label
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000204 .endm
Deepak Pandeyb5615362018-10-11 13:44:43 +0530205
206 /*
207 * Helper macro that reads the part number of the current
208 * CPU and jumps to the given label if it matches the CPU
209 * MIDR provided.
210 *
211 * Clobbers x0.
212 */
213 .macro jump_if_cpu_midr _cpu_midr, _label
214 mrs x0, midr_el1
215 ubfx x0, x0, MIDR_PN_SHIFT, #12
216 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
217 b.eq \_label
218 .endm
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000219
Boyan Karatotev821364e2023-01-27 09:35:10 +0000220
221/*
222 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
223 * will be applied automatically
224 *
225 * _cpu:
226 * Name of cpu as given to declare_cpu_ops
227 *
228 * _cve:
229 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
230 *
231 * _id:
232 * Erratum or CVE number. Please combine with previous field with ERRATUM
233 * or CVE macros
234 *
235 * _chosen:
236 * Compile time flag on whether the erratum is included
237 *
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600238 * _split_wa:
239 * Flag that indicates whether an erratum has split workaround or not.
240 * Default value is 0.
Boyan Karatotev821364e2023-01-27 09:35:10 +0000241 */
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600242.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _split_wa=0
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000243#if REPORT_ERRATA || ERRATA_ABI_SUPPORT
Boyan Karatotev821364e2023-01-27 09:35:10 +0000244 .pushsection .rodata.errata_entries
245 .align 3
246 .ifndef \_cpu\()_errata_list_start
247 \_cpu\()_errata_list_start:
248 .endif
249
Boyan Karatotev821364e2023-01-27 09:35:10 +0000250 .quad check_erratum_\_cpu\()_\_id
251 /* Will fit CVEs with up to 10 character in the ID field */
252 .word \_id
253 .hword \_cve
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600254 /* bit magic that appends chosen field based on _split_wa */
255 .byte ((\_chosen * 0b11) & ((\_split_wa << 1) | \_chosen))
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000256 .byte 0x0 /* alignment */
Boyan Karatotev821364e2023-01-27 09:35:10 +0000257 .popsection
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000258#endif
Boyan Karatotev821364e2023-01-27 09:35:10 +0000259.endm
260
Boyan Karatotev821364e2023-01-27 09:35:10 +0000261/*******************************************************************************
262 * Errata workaround wrappers
263 ******************************************************************************/
264/*
265 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
266 * will be applied automatically
267 *
268 * _cpu:
269 * Name of cpu as given to declare_cpu_ops
270 *
271 * _cve:
272 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
273 *
274 * _id:
275 * Erratum or CVE number. Please combine with previous field with ERRATUM
276 * or CVE macros
277 *
278 * _chosen:
279 * Compile time flag on whether the erratum is included
280 *
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600281 * _split_wa:
282 * Flag that indicates whether an erratum has split workaround or not.
283 * Default value is 0.
284 *
Boyan Karatotev821364e2023-01-27 09:35:10 +0000285 * in body:
286 * clobber x0 to x7 (please only use those)
287 * argument x7 - cpu_rev_var
288 *
289 * _wa clobbers: x0-x8 (PCS compliant)
290 */
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600291.macro workaround_reset_start _cpu:req, _cve:req, _id:req, \
292 _chosen:req, _split_wa=0
293
294 add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_split_wa
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000295
296 .if \_chosen
297 /* put errata directly into the reset function */
298 .pushsection .text.asm.\_cpu\()_reset_func, "ax"
299 .else
300 /* or something else that will get garbage collected by the
301 * linker */
302 .pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
303 .endif
304 /* revision is stored in x14, get it */
305 mov x0, x14
306 bl check_erratum_\_cpu\()_\_id
307 /* save rev_var for workarounds that might need it */
308 mov x7, x14
309 cbz x0, erratum_\_cpu\()_\_id\()_skip_reset
Boyan Karatotev821364e2023-01-27 09:35:10 +0000310.endm
311
312/*
313 * See `workaround_reset_start` for usage info. Additional arguments:
314 *
315 * _midr:
316 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified
317 * for errata applied in generic code
318 */
319.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000320 add_erratum_entry \_cpu, \_cve, \_id, \_chosen
321
322 func erratum_\_cpu\()_\_id\()_wa
323 mov x8, x30
Boyan Karatotev821364e2023-01-27 09:35:10 +0000324 /*
325 * Let errata specify if they need MIDR checking. Sadly, storing the
326 * MIDR in an .equ to retrieve automatically blows up as it stores some
327 * brackets in the symbol
328 */
329 .ifnb \_midr
330 jump_if_cpu_midr \_midr, 1f
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000331 b erratum_\_cpu\()_\_id\()_skip_runtime
Boyan Karatotev821364e2023-01-27 09:35:10 +0000332
333 1:
334 .endif
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000335 /* save rev_var for workarounds that might need it but don't
336 * restore to x0 because few will care */
337 mov x7, x0
338 bl check_erratum_\_cpu\()_\_id
339 cbz x0, erratum_\_cpu\()_\_id\()_skip_runtime
Boyan Karatotev821364e2023-01-27 09:35:10 +0000340.endm
341
342/*
343 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
344 * is kept here so the same #define can be used as that macro
345 */
346.macro workaround_reset_end _cpu:req, _cve:req, _id:req
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000347 erratum_\_cpu\()_\_id\()_skip_reset:
348 .popsection
Boyan Karatotev821364e2023-01-27 09:35:10 +0000349.endm
350
351/*
352 * See `workaround_reset_start` for usage info. The _cve argument is kept here
353 * so the same #define can be used as that macro. Additional arguments:
354 *
355 * _no_isb:
356 * Optionally do not include the trailing isb. Please disable with the
357 * NO_ISB macro
358 */
359.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
360 /*
361 * Runtime errata do not have a reset function to call the isb for them
362 * and missing the isb could be very problematic. It is also likely as
363 * they tend to be scattered in generic code.
364 */
365 .ifb \_no_isb
366 isb
367 .endif
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000368 erratum_\_cpu\()_\_id\()_skip_runtime:
369 ret x8
370 endfunc erratum_\_cpu\()_\_id\()_wa
Boyan Karatotev821364e2023-01-27 09:35:10 +0000371.endm
372
373/*******************************************************************************
374 * Errata workaround helpers
375 ******************************************************************************/
376/*
377 * Set a bit in a system register. Can set multiple bits but is limited by the
378 * way the ORR instruction encodes them.
379 *
380 * _reg:
381 * Register to write to
382 *
383 * _bit:
384 * Bit to set. Please use a descriptive #define
385 *
386 * _assert:
387 * Optionally whether to read back and assert that the bit has been
388 * written. Please disable with NO_ASSERT macro
389 *
390 * clobbers: x1
391 */
392.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
393 mrs x1, \_reg
394 orr x1, x1, #\_bit
395 msr \_reg, x1
396.endm
397
398/*
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100399 * Clear a bit in a system register. Can clear multiple bits but is limited by
400 * the way the BIC instrucion encodes them.
401 *
402 * see sysreg_bit_set for usage
403 */
404.macro sysreg_bit_clear _reg:req, _bit:req
405 mrs x1, \_reg
406 bic x1, x1, #\_bit
407 msr \_reg, x1
408.endm
409
Boyan Karatotev6c473862025-01-21 11:41:46 +0000410/*
411 * Toggle a bit in a system register. Can toggle multiple bits but is limited by
412 * the way the EOR instrucion encodes them.
413 *
414 * see sysreg_bit_set for usage
415 */
416.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
417 mrs x1, \_reg
418 eor x1, x1, #\_bit
419 msr \_reg, x1
420.endm
421
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100422.macro override_vector_table _table:req
423 adr x1, \_table
424 msr vbar_el3, x1
425.endm
426
427/*
Jayanth Dodderi Chidanandf566e102023-06-19 16:20:02 +0100428 * BFI : Inserts bitfield into a system register.
429 *
430 * BFI{cond} Rd, Rn, #lsb, #width
431 */
432.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
433 /* Source value for BFI */
434 mov x1, #\_src
435 mrs x0, \_reg
436 bfi x0, x1, #\_lsb, #\_width
437 msr \_reg, x0
438.endm
439
Jagdish Gediya1670a2b2024-07-23 12:54:28 +0100440.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
441 /* Source value in register for BFI */
442 mov x1, \_gpr
443 mrs x0, \_reg
444 bfi x0, x1, #\_lsb, #\_width
445 msr \_reg, x0
446.endm
447
Jayanth Dodderi Chidanandf566e102023-06-19 16:20:02 +0100448/*
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000449 * Extract CPU revision and variant, and combine them into a single numeric for
450 * easier comparison.
451 *
452 * _res:
453 * register where the result will be placed
454 * _tmp:
455 * register to clobber for temporaries
456 */
457.macro get_rev_var _res:req, _tmp:req
458 mrs \_tmp, midr_el1
459
460 /*
461 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
462 * as variant[7:4] and revision[3:0] of x0.
463 *
464 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
465 * extract x1[3:0] into x0[3:0] retaining other bits.
466 */
467 ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
468 bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
469.endm
470
471/*
Boyan Karatotev821364e2023-01-27 09:35:10 +0000472 * Apply erratum
473 *
474 * _cpu:
475 * Name of cpu as given to declare_cpu_ops
476 *
477 * _cve:
478 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
479 *
480 * _id:
481 * Erratum or CVE number. Please combine with previous field with ERRATUM
482 * or CVE macros
483 *
484 * _chosen:
485 * Compile time flag on whether the erratum is included
486 *
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100487 * _get_rev:
488 * Optional parameter that determines whether to insert a call to the CPU revision fetching
Boyan Karatotev24395f42024-09-26 17:09:53 +0100489 * procedure. Stores the result of this in the temporary register x10 to allow for chaining
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100490 *
491 * clobbers: x0-x10 (PCS compliant)
Boyan Karatotev821364e2023-01-27 09:35:10 +0000492 */
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100493.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
Boyan Karatotevcd7482a2024-09-26 17:00:09 +0100494 .if (\_chosen && \_get_rev)
Boyan Karatotev821364e2023-01-27 09:35:10 +0000495 mov x9, x30
496 bl cpu_get_rev_var
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100497 mov x10, x0
498 .elseif (\_chosen)
499 mov x9, x30
500 mov x0, x10
501 .endif
502
503 .if \_chosen
Boyan Karatotev821364e2023-01-27 09:35:10 +0000504 bl erratum_\_cpu\()_\_id\()_wa
505 mov x30, x9
Boyan Karatotev821364e2023-01-27 09:35:10 +0000506 .endif
507.endm
508
509/*
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000510 * Helpers to report if an erratum applies. Compares the given revision variant
511 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
512 *
513 * _rev_num: the given revision variant. Or
514 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
515 *
516 * in body:
517 * clobber: x0
518 * argument: x0 - cpu_rev_var
519 */
520.macro cpu_rev_var_ls _rev_num:req
521 cmp x0, #\_rev_num
522 cset x0, ls
523.endm
524
525.macro cpu_rev_var_hs _rev_num:req
526 cmp x0, #\_rev_num
527 cset x0, hs
528.endm
529
530.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
531 cmp x0, #\_rev_num_lo
532 mov x1, #\_rev_num_hi
533 ccmp x0, x1, #2, hs
534 cset x0, ls
535.endm
536
Govindraj Raja84f62a72025-04-03 12:57:32 -0500537
538#if __clang_major__ < 17
539/*
540 * A problem with clang version < 17 can cause resolving nested
541 * 'cfi_startproc' to fail compilation.
542 * So add a compatibility variant for start and endfunc expansions
543 * to ignore `cfi_startproc` and `cfi_endproc`, this to be used only with
544 * check_errata/reset macros if we build TF-A with clang version < 17
545 */
546
547.macro func_compat _name, _align=2
548 .section .text.asm.\_name, "ax"
549 .type \_name, %function
550 .align \_align
551 \_name:
552#if ENABLE_BTI
553 bti jc
554#endif
555.endm
556
557/*
558 * This macro is used to mark the end of a function.
559 */
560.macro endfunc_compat _name
561 .size \_name, . - \_name
562.endm
563
564#else
565
566#define func_compat func
567#define endfunc_compat endfunc
568
569#endif /* __clang_version__ < 17 */
570
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000571/*
572 * Helpers to select which revisions errata apply to.
Boyan Karatotev821364e2023-01-27 09:35:10 +0000573 *
574 * _cpu:
575 * Name of cpu as given to declare_cpu_ops
576 *
577 * _cve:
578 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
579 *
580 * _id:
581 * Erratum or CVE number. Please combine with previous field with ERRATUM
582 * or CVE macros
583 *
584 * _rev_num:
585 * Revision to apply to
586 *
587 * in body:
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000588 * clobber: x0 to x1
Boyan Karatotev821364e2023-01-27 09:35:10 +0000589 * argument: x0 - cpu_rev_var
590 */
591.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500592 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000593 cpu_rev_var_ls \_rev_num
594 ret
Govindraj Raja84f62a72025-04-03 12:57:32 -0500595 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev821364e2023-01-27 09:35:10 +0000596.endm
597
598.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500599 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000600 cpu_rev_var_hs \_rev_num
601 ret
Govindraj Raja84f62a72025-04-03 12:57:32 -0500602 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev821364e2023-01-27 09:35:10 +0000603.endm
604
605.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500606 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000607 cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
608 ret
Govindraj Raja84f62a72025-04-03 12:57:32 -0500609 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev821364e2023-01-27 09:35:10 +0000610.endm
611
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100612.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500613 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100614 .if \_chosen
615 mov x0, #ERRATA_APPLIES
616 .else
617 mov x0, #ERRATA_MISSING
618 .endif
619 ret
Govindraj Raja84f62a72025-04-03 12:57:32 -0500620 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100621.endm
622
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000623/*
624 * provide a shorthand for the name format for annoying errata
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000625 * body: clobber x0 to x4
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000626 */
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100627.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500628 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100629.endm
630
631.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500632 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100633.endm
634
Boyan Karatotev821364e2023-01-27 09:35:10 +0000635/*******************************************************************************
636 * CPU reset function wrapper
637 ******************************************************************************/
638
639/*
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000640 * Helper to register a cpu with the errata framework. Begins the definition of
641 * the reset function.
Boyan Karatotev821364e2023-01-27 09:35:10 +0000642 *
643 * _cpu:
644 * Name of cpu as given to declare_cpu_ops
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000645 */
646.macro cpu_reset_prologue _cpu:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500647 func_compat \_cpu\()_reset_func
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000648 mov x15, x30
649 get_rev_var x14, x0
650.endm
651
652/*
653 * Wrapper of the reset function to automatically apply all reset-time errata.
654 * Will end with an isb.
655 *
656 * _cpu:
657 * Name of cpu as given to declare_cpu_ops
Boyan Karatotev821364e2023-01-27 09:35:10 +0000658 *
659 * in body:
660 * clobber x8 to x14
661 * argument x14 - cpu_rev_var
662 */
663.macro cpu_reset_func_start _cpu:req
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000664 /* the func/endfunc macros will change sections. So change the section
665 * back to the reset function's */
666 .section .text.asm.\_cpu\()_reset_func, "ax"
Boyan Karatotev821364e2023-01-27 09:35:10 +0000667.endm
668
669.macro cpu_reset_func_end _cpu:req
670 isb
671 ret x15
Govindraj Raja84f62a72025-04-03 12:57:32 -0500672 endfunc_compat \_cpu\()_reset_func
Boyan Karatotev821364e2023-01-27 09:35:10 +0000673.endm
Boyan Karatotev29fa56d2023-01-27 09:38:15 +0000674
Boyan Karatoteve02c7f32024-11-25 10:14:26 +0000675/*
676 * Helper macro that enables Maximum Power Mitigation Mechanism (MPMM) on
677 * compatible Arm cores.
678 *
679 * Clobbers x0.
680 */
681.macro enable_mpmm
682#if ENABLE_MPMM
683 mrs x0, CPUPPMCR_EL3
684 /* if CPUPPMCR_EL3.MPMMPINCTL != 0, skip enabling MPMM */
685 ands x0, x0, CPUPPMCR_EL3_MPMMPINCTL_BIT
686 b.ne 1f
687 sysreg_bit_set CPUPPMCR_EL3, CPUMPMMCR_EL3_MPMM_EN_BIT
688 1:
689#endif
690.endm
691
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000692#endif /* CPU_MACROS_S */