blob: 5d2bb7bcc5d84f220141bec8ad8c09fb90e168fa [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Boyan Karatotev6c473862025-01-21 11:41:46 +00002 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +00006#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
Achin Gupta4f6ad662013-10-25 09:08:21 +01008
Antonio Nino Diaza9044872019-02-12 11:25:02 +00009#include <assert_macros.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000010#include <lib/cpus/cpu_ops.h>
Boyan Karatotev5d38cb32023-01-27 09:37:07 +000011#include <lib/cpus/errata.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010012
Soby Mathewc704cbc2014-08-14 11:33:56 +010013 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000014 * Write given expressions as quad words
15 *
16 * _count:
17 * Write at least _count quad words. If the given number of
18 * expressions is less than _count, repeat the last expression to
19 * fill _count quad words in total
20 * _rest:
21 * Optional list of expressions. _this is for parameter extraction
22 * only, and has no significance to the caller
23 *
24 * Invoked as:
25 * fill_constants 2, foo, bar, blah, ...
Achin Gupta4f6ad662013-10-25 09:08:21 +010026 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000027 .macro fill_constants _count:req, _this, _rest:vararg
28 .ifgt \_count
29 /* Write the current expression */
30 .ifb \_this
31 .error "Nothing to fill"
32 .endif
33 .quad \_this
34
35 /* Invoke recursively for remaining expressions */
36 .ifnb \_rest
37 fill_constants \_count-1, \_rest
38 .else
39 fill_constants \_count-1, \_this
40 .endif
41 .endif
42 .endm
43
44 /*
45 * Declare CPU operations
46 *
47 * _name:
48 * Name of the CPU for which operations are being specified
49 * _midr:
50 * Numeric value expected to read from CPU's MIDR
51 * _resetfunc:
Boyan Karatotev1dcba8f2024-11-19 11:27:01 +000052 * Reset function for the CPU.
laurenw-arm94accd32019-08-20 15:51:24 -050053 * _e_handler:
54 * This is a placeholder for future per CPU exception handlers.
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000055 * _power_down_ops:
56 * Comma-separated list of functions to perform power-down
57 * operatios on the CPU. At least one, and up to
58 * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
59 * Starting at power level 0, these functions shall handle power
60 * down at subsequent power levels. If there aren't exactly
61 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
62 * used to handle power down at subsequent levels
63 */
Dimitris Papastamos914757c2018-03-12 14:47:09 +000064 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
Arvind Ram Prakashe82f7592024-09-16 16:57:33 -050065 _e_handler:req, _power_down_ops:vararg
Chris Kay33bfc5e2023-02-14 11:30:04 +000066 .section .cpu_ops, "a"
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000067 .align 3
Soby Mathewc704cbc2014-08-14 11:33:56 +010068 .type cpu_ops_\_name, %object
69 .quad \_midr
Roberto Vargase0e99462017-10-30 14:43:43 +000070#if defined(IMAGE_AT_EL3)
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000071 .quad \_resetfunc
Soby Mathewc704cbc2014-08-14 11:33:56 +010072#endif
laurenw-arm94accd32019-08-20 15:51:24 -050073 .quad \_e_handler
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090074#ifdef IMAGE_BL31
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000075 /* Insert list of functions */
76 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +010077#endif
Boyan Karatotev821364e2023-01-27 09:35:10 +000078 /*
79 * It is possible (although unlikely) that a cpu may have no errata in
80 * code. In that case the start label will not be defined. The list is
81 * intended to be used in a loop, so define it as zero-length for
82 * predictable behaviour. Since this macro is always called at the end
83 * of the cpu file (after all errata have been parsed) we can be sure
84 * that we are at the end of the list. Some cpus call declare_cpu_ops
85 * twice, so only do this once.
86 */
87 .pushsection .rodata.errata_entries
88 .ifndef \_name\()_errata_list_start
89 \_name\()_errata_list_start:
90 .endif
91 .ifndef \_name\()_errata_list_end
92 \_name\()_errata_list_end:
93 .endif
94 .popsection
95
96 /* and now put them in cpu_ops */
97 .quad \_name\()_errata_list_start
98 .quad \_name\()_errata_list_end
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +000099
100#if REPORT_ERRATA
101 .ifndef \_name\()_cpu_str
102 /*
103 * Place errata reported flag, and the spinlock to arbitrate access to
104 * it in the data section.
105 */
106 .pushsection .data
107 define_asm_spinlock \_name\()_errata_lock
108 \_name\()_errata_reported:
109 .word 0
110 .popsection
111
112 /* Place CPU string in rodata */
113 .pushsection .rodata
114 \_name\()_cpu_str:
115 .asciz "\_name"
116 .popsection
117 .endif
118
Boyan Karatotev821364e2023-01-27 09:35:10 +0000119 .quad \_name\()_cpu_str
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000120
121#ifdef IMAGE_BL31
122 /* Pointers to errata lock and reported flag */
123 .quad \_name\()_errata_lock
124 .quad \_name\()_errata_reported
Boyan Karatotev821364e2023-01-27 09:35:10 +0000125#endif /* IMAGE_BL31 */
126#endif /* REPORT_ERRATA */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000127
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900128#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100129 .quad \_name\()_cpu_reg_dump
130#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100131 .endm
Dan Handleyea596682015-04-01 17:34:24 +0100132
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000133 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
134 _power_down_ops:vararg
Arvind Ram Prakash288c3a02025-04-04 14:19:16 -0500135 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, \_power_down_ops
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000136 .endm
137
laurenw-arm94accd32019-08-20 15:51:24 -0500138 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
139 _e_handler:req, _power_down_ops:vararg
140 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakash288c3a02025-04-04 14:19:16 -0500141 \_e_handler, \_power_down_ops
laurenw-arm94accd32019-08-20 15:51:24 -0500142 .endm
143
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000144 /*
145 * This macro is used on some CPUs to detect if they are vulnerable
146 * to CVE-2017-5715.
147 */
148 .macro cpu_check_csv2 _reg _label
149 mrs \_reg, id_aa64pfr0_el1
150 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
151 /*
Antonio Nino Diaza9044872019-02-12 11:25:02 +0000152 * If the field equals 1, branch targets trained in one context cannot
153 * affect speculative execution in a different context.
154 *
155 * If the field equals 2, it means that the system is also aware of
156 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
157 * expect users of the registers to do the right thing.
158 *
159 * Only apply mitigations if the value of this field is 0.
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000160 */
Antonio Nino Diaza9044872019-02-12 11:25:02 +0000161#if ENABLE_ASSERTIONS
162 cmp \_reg, #3 /* Only values 0 to 2 are expected */
163 ASM_ASSERT(lo)
164#endif
165
166 cmp \_reg, #0
167 bne \_label
Dimitris Papastamos780cc952018-03-12 13:27:02 +0000168 .endm
Deepak Pandeyb5615362018-10-11 13:44:43 +0530169
170 /*
171 * Helper macro that reads the part number of the current
172 * CPU and jumps to the given label if it matches the CPU
173 * MIDR provided.
174 *
175 * Clobbers x0.
176 */
177 .macro jump_if_cpu_midr _cpu_midr, _label
178 mrs x0, midr_el1
179 ubfx x0, x0, MIDR_PN_SHIFT, #12
180 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
181 b.eq \_label
182 .endm
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000183
Boyan Karatotev821364e2023-01-27 09:35:10 +0000184
185/*
186 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
187 * will be applied automatically
188 *
189 * _cpu:
190 * Name of cpu as given to declare_cpu_ops
191 *
192 * _cve:
193 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
194 *
195 * _id:
196 * Erratum or CVE number. Please combine with previous field with ERRATUM
197 * or CVE macros
198 *
199 * _chosen:
200 * Compile time flag on whether the erratum is included
201 *
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600202 * _split_wa:
203 * Flag that indicates whether an erratum has split workaround or not.
204 * Default value is 0.
Boyan Karatotev821364e2023-01-27 09:35:10 +0000205 */
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600206.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _split_wa=0
Arvind Ram Prakash288c3a02025-04-04 14:19:16 -0500207#if INCLUDE_ERRATA_LIST
Boyan Karatotev821364e2023-01-27 09:35:10 +0000208 .pushsection .rodata.errata_entries
209 .align 3
210 .ifndef \_cpu\()_errata_list_start
211 \_cpu\()_errata_list_start:
212 .endif
213
Boyan Karatotev821364e2023-01-27 09:35:10 +0000214 .quad check_erratum_\_cpu\()_\_id
215 /* Will fit CVEs with up to 10 character in the ID field */
216 .word \_id
217 .hword \_cve
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600218 /* bit magic that appends chosen field based on _split_wa */
219 .byte ((\_chosen * 0b11) & ((\_split_wa << 1) | \_chosen))
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000220 .byte 0x0 /* alignment */
Boyan Karatotev821364e2023-01-27 09:35:10 +0000221 .popsection
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000222#endif
Boyan Karatotev821364e2023-01-27 09:35:10 +0000223.endm
224
Boyan Karatotev821364e2023-01-27 09:35:10 +0000225/*******************************************************************************
226 * Errata workaround wrappers
227 ******************************************************************************/
228/*
229 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
230 * will be applied automatically
231 *
232 * _cpu:
233 * Name of cpu as given to declare_cpu_ops
234 *
235 * _cve:
236 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
237 *
238 * _id:
239 * Erratum or CVE number. Please combine with previous field with ERRATUM
240 * or CVE macros
241 *
242 * _chosen:
243 * Compile time flag on whether the erratum is included
244 *
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600245 * _split_wa:
246 * Flag that indicates whether an erratum has split workaround or not.
247 * Default value is 0.
248 *
Boyan Karatotev821364e2023-01-27 09:35:10 +0000249 * in body:
250 * clobber x0 to x7 (please only use those)
251 * argument x7 - cpu_rev_var
252 *
253 * _wa clobbers: x0-x8 (PCS compliant)
254 */
Arvind Ram Prakash6585c6a2025-02-24 17:22:01 -0600255.macro workaround_reset_start _cpu:req, _cve:req, _id:req, \
256 _chosen:req, _split_wa=0
257
258 add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_split_wa
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000259
260 .if \_chosen
261 /* put errata directly into the reset function */
262 .pushsection .text.asm.\_cpu\()_reset_func, "ax"
263 .else
264 /* or something else that will get garbage collected by the
265 * linker */
266 .pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
267 .endif
268 /* revision is stored in x14, get it */
269 mov x0, x14
270 bl check_erratum_\_cpu\()_\_id
271 /* save rev_var for workarounds that might need it */
272 mov x7, x14
273 cbz x0, erratum_\_cpu\()_\_id\()_skip_reset
Boyan Karatotev821364e2023-01-27 09:35:10 +0000274.endm
275
276/*
277 * See `workaround_reset_start` for usage info. Additional arguments:
278 *
279 * _midr:
280 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified
281 * for errata applied in generic code
282 */
283.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000284 add_erratum_entry \_cpu, \_cve, \_id, \_chosen
285
286 func erratum_\_cpu\()_\_id\()_wa
287 mov x8, x30
Boyan Karatotev821364e2023-01-27 09:35:10 +0000288 /*
289 * Let errata specify if they need MIDR checking. Sadly, storing the
290 * MIDR in an .equ to retrieve automatically blows up as it stores some
291 * brackets in the symbol
292 */
293 .ifnb \_midr
294 jump_if_cpu_midr \_midr, 1f
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000295 b erratum_\_cpu\()_\_id\()_skip_runtime
Boyan Karatotev821364e2023-01-27 09:35:10 +0000296
297 1:
298 .endif
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000299 /* save rev_var for workarounds that might need it but don't
300 * restore to x0 because few will care */
301 mov x7, x0
302 bl check_erratum_\_cpu\()_\_id
303 cbz x0, erratum_\_cpu\()_\_id\()_skip_runtime
Boyan Karatotev821364e2023-01-27 09:35:10 +0000304.endm
305
306/*
307 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
308 * is kept here so the same #define can be used as that macro
309 */
310.macro workaround_reset_end _cpu:req, _cve:req, _id:req
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000311 erratum_\_cpu\()_\_id\()_skip_reset:
312 .popsection
Boyan Karatotev821364e2023-01-27 09:35:10 +0000313.endm
314
315/*
316 * See `workaround_reset_start` for usage info. The _cve argument is kept here
317 * so the same #define can be used as that macro. Additional arguments:
318 *
319 * _no_isb:
320 * Optionally do not include the trailing isb. Please disable with the
321 * NO_ISB macro
322 */
323.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
324 /*
325 * Runtime errata do not have a reset function to call the isb for them
326 * and missing the isb could be very problematic. It is also likely as
327 * they tend to be scattered in generic code.
328 */
329 .ifb \_no_isb
330 isb
331 .endif
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000332 erratum_\_cpu\()_\_id\()_skip_runtime:
333 ret x8
334 endfunc erratum_\_cpu\()_\_id\()_wa
Boyan Karatotev821364e2023-01-27 09:35:10 +0000335.endm
336
337/*******************************************************************************
338 * Errata workaround helpers
339 ******************************************************************************/
340/*
341 * Set a bit in a system register. Can set multiple bits but is limited by the
342 * way the ORR instruction encodes them.
343 *
344 * _reg:
345 * Register to write to
346 *
347 * _bit:
348 * Bit to set. Please use a descriptive #define
349 *
350 * _assert:
351 * Optionally whether to read back and assert that the bit has been
352 * written. Please disable with NO_ASSERT macro
353 *
354 * clobbers: x1
355 */
356.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
357 mrs x1, \_reg
358 orr x1, x1, #\_bit
359 msr \_reg, x1
360.endm
361
362/*
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100363 * Clear a bit in a system register. Can clear multiple bits but is limited by
364 * the way the BIC instrucion encodes them.
365 *
366 * see sysreg_bit_set for usage
367 */
368.macro sysreg_bit_clear _reg:req, _bit:req
369 mrs x1, \_reg
370 bic x1, x1, #\_bit
371 msr \_reg, x1
372.endm
373
Boyan Karatotev6c473862025-01-21 11:41:46 +0000374/*
375 * Toggle a bit in a system register. Can toggle multiple bits but is limited by
376 * the way the EOR instrucion encodes them.
377 *
378 * see sysreg_bit_set for usage
379 */
380.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
381 mrs x1, \_reg
382 eor x1, x1, #\_bit
383 msr \_reg, x1
384.endm
385
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100386.macro override_vector_table _table:req
387 adr x1, \_table
388 msr vbar_el3, x1
389.endm
390
391/*
Jayanth Dodderi Chidanandf566e102023-06-19 16:20:02 +0100392 * BFI : Inserts bitfield into a system register.
393 *
394 * BFI{cond} Rd, Rn, #lsb, #width
395 */
396.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
397 /* Source value for BFI */
398 mov x1, #\_src
399 mrs x0, \_reg
400 bfi x0, x1, #\_lsb, #\_width
401 msr \_reg, x0
402.endm
403
Jagdish Gediya1670a2b2024-07-23 12:54:28 +0100404.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
405 /* Source value in register for BFI */
406 mov x1, \_gpr
407 mrs x0, \_reg
408 bfi x0, x1, #\_lsb, #\_width
409 msr \_reg, x0
410.endm
411
Jayanth Dodderi Chidanandf566e102023-06-19 16:20:02 +0100412/*
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000413 * Extract CPU revision and variant, and combine them into a single numeric for
414 * easier comparison.
415 *
416 * _res:
417 * register where the result will be placed
418 * _tmp:
419 * register to clobber for temporaries
420 */
421.macro get_rev_var _res:req, _tmp:req
422 mrs \_tmp, midr_el1
423
424 /*
425 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
426 * as variant[7:4] and revision[3:0] of x0.
427 *
428 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
429 * extract x1[3:0] into x0[3:0] retaining other bits.
430 */
431 ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
432 bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
433.endm
434
435/*
Boyan Karatotev821364e2023-01-27 09:35:10 +0000436 * Apply erratum
437 *
438 * _cpu:
439 * Name of cpu as given to declare_cpu_ops
440 *
441 * _cve:
442 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
443 *
444 * _id:
445 * Erratum or CVE number. Please combine with previous field with ERRATUM
446 * or CVE macros
447 *
448 * _chosen:
449 * Compile time flag on whether the erratum is included
450 *
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100451 * _get_rev:
452 * Optional parameter that determines whether to insert a call to the CPU revision fetching
Boyan Karatotev24395f42024-09-26 17:09:53 +0100453 * procedure. Stores the result of this in the temporary register x10 to allow for chaining
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100454 *
455 * clobbers: x0-x10 (PCS compliant)
Boyan Karatotev821364e2023-01-27 09:35:10 +0000456 */
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100457.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
Boyan Karatotevcd7482a2024-09-26 17:00:09 +0100458 .if (\_chosen && \_get_rev)
Boyan Karatotev821364e2023-01-27 09:35:10 +0000459 mov x9, x30
460 bl cpu_get_rev_var
Harrison Mutaide3fa1e2023-06-26 16:25:21 +0100461 mov x10, x0
462 .elseif (\_chosen)
463 mov x9, x30
464 mov x0, x10
465 .endif
466
467 .if \_chosen
Boyan Karatotev821364e2023-01-27 09:35:10 +0000468 bl erratum_\_cpu\()_\_id\()_wa
469 mov x30, x9
Boyan Karatotev821364e2023-01-27 09:35:10 +0000470 .endif
471.endm
472
473/*
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000474 * Helpers to report if an erratum applies. Compares the given revision variant
475 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
476 *
477 * _rev_num: the given revision variant. Or
478 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
479 *
480 * in body:
481 * clobber: x0
482 * argument: x0 - cpu_rev_var
483 */
484.macro cpu_rev_var_ls _rev_num:req
485 cmp x0, #\_rev_num
486 cset x0, ls
487.endm
488
489.macro cpu_rev_var_hs _rev_num:req
490 cmp x0, #\_rev_num
491 cset x0, hs
492.endm
493
494.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
495 cmp x0, #\_rev_num_lo
496 mov x1, #\_rev_num_hi
497 ccmp x0, x1, #2, hs
498 cset x0, ls
499.endm
500
Govindraj Raja84f62a72025-04-03 12:57:32 -0500501
502#if __clang_major__ < 17
503/*
504 * A problem with clang version < 17 can cause resolving nested
505 * 'cfi_startproc' to fail compilation.
506 * So add a compatibility variant for start and endfunc expansions
507 * to ignore `cfi_startproc` and `cfi_endproc`, this to be used only with
508 * check_errata/reset macros if we build TF-A with clang version < 17
509 */
510
511.macro func_compat _name, _align=2
512 .section .text.asm.\_name, "ax"
513 .type \_name, %function
514 .align \_align
515 \_name:
516#if ENABLE_BTI
517 bti jc
518#endif
519.endm
520
521/*
522 * This macro is used to mark the end of a function.
523 */
524.macro endfunc_compat _name
525 .size \_name, . - \_name
526.endm
527
528#else
529
530#define func_compat func
531#define endfunc_compat endfunc
532
533#endif /* __clang_version__ < 17 */
534
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000535/*
536 * Helpers to select which revisions errata apply to.
Boyan Karatotev821364e2023-01-27 09:35:10 +0000537 *
538 * _cpu:
539 * Name of cpu as given to declare_cpu_ops
540 *
541 * _cve:
542 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
543 *
544 * _id:
545 * Erratum or CVE number. Please combine with previous field with ERRATUM
546 * or CVE macros
547 *
548 * _rev_num:
549 * Revision to apply to
550 *
551 * in body:
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000552 * clobber: x0 to x1
Boyan Karatotev821364e2023-01-27 09:35:10 +0000553 * argument: x0 - cpu_rev_var
554 */
555.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500556 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000557 cpu_rev_var_ls \_rev_num
558 ret
Govindraj Raja84f62a72025-04-03 12:57:32 -0500559 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev821364e2023-01-27 09:35:10 +0000560.endm
561
562.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500563 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000564 cpu_rev_var_hs \_rev_num
565 ret
Govindraj Raja84f62a72025-04-03 12:57:32 -0500566 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev821364e2023-01-27 09:35:10 +0000567.endm
568
569.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500570 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000571 cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
572 ret
Govindraj Raja84f62a72025-04-03 12:57:32 -0500573 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotev821364e2023-01-27 09:35:10 +0000574.endm
575
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100576.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500577 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100578 .if \_chosen
579 mov x0, #ERRATA_APPLIES
580 .else
581 mov x0, #ERRATA_MISSING
582 .endif
583 ret
Govindraj Raja84f62a72025-04-03 12:57:32 -0500584 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100585.endm
586
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000587/*
588 * provide a shorthand for the name format for annoying errata
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000589 * body: clobber x0 to x4
Boyan Karatotev23f4ff92025-01-21 08:44:52 +0000590 */
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100591.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500592 func_compat check_erratum_\_cpu\()_\_id
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100593.endm
594
595.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500596 endfunc_compat check_erratum_\_cpu\()_\_id
Boyan Karatotevcea0c262023-04-04 11:29:00 +0100597.endm
598
Boyan Karatotev821364e2023-01-27 09:35:10 +0000599/*******************************************************************************
600 * CPU reset function wrapper
601 ******************************************************************************/
602
603/*
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000604 * Helper to register a cpu with the errata framework. Begins the definition of
605 * the reset function.
Boyan Karatotev821364e2023-01-27 09:35:10 +0000606 *
607 * _cpu:
608 * Name of cpu as given to declare_cpu_ops
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000609 */
610.macro cpu_reset_prologue _cpu:req
Govindraj Raja84f62a72025-04-03 12:57:32 -0500611 func_compat \_cpu\()_reset_func
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000612 mov x15, x30
613 get_rev_var x14, x0
614.endm
615
616/*
617 * Wrapper of the reset function to automatically apply all reset-time errata.
618 * Will end with an isb.
619 *
620 * _cpu:
621 * Name of cpu as given to declare_cpu_ops
Boyan Karatotev821364e2023-01-27 09:35:10 +0000622 *
623 * in body:
624 * clobber x8 to x14
625 * argument x14 - cpu_rev_var
626 */
627.macro cpu_reset_func_start _cpu:req
Boyan Karatotev74ddacc2025-01-22 13:54:43 +0000628 /* the func/endfunc macros will change sections. So change the section
629 * back to the reset function's */
630 .section .text.asm.\_cpu\()_reset_func, "ax"
Boyan Karatotev821364e2023-01-27 09:35:10 +0000631.endm
632
633.macro cpu_reset_func_end _cpu:req
634 isb
635 ret x15
Govindraj Raja84f62a72025-04-03 12:57:32 -0500636 endfunc_compat \_cpu\()_reset_func
Boyan Karatotev821364e2023-01-27 09:35:10 +0000637.endm
Boyan Karatotev29fa56d2023-01-27 09:38:15 +0000638
Boyan Karatoteve02c7f32024-11-25 10:14:26 +0000639/*
640 * Helper macro that enables Maximum Power Mitigation Mechanism (MPMM) on
641 * compatible Arm cores.
642 *
643 * Clobbers x0.
644 */
645.macro enable_mpmm
646#if ENABLE_MPMM
647 mrs x0, CPUPPMCR_EL3
648 /* if CPUPPMCR_EL3.MPMMPINCTL != 0, skip enabling MPMM */
649 ands x0, x0, CPUPPMCR_EL3_MPMMPINCTL_BIT
650 b.ne 1f
651 sysreg_bit_set CPUPPMCR_EL3, CPUMPMMCR_EL3_MPMM_EN_BIT
652 1:
653#endif
654.endm
655
Antonio Nino Diaz5eb88372018-11-08 10:20:19 +0000656#endif /* CPU_MACROS_S */