Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 1 | /* |
Boyan Karatotev | e7d7c27 | 2023-01-25 16:55:18 +0000 | [diff] [blame] | 2 | * Copyright (c) 2014-2023, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 5 | */ |
Antonio Nino Diaz | 5eb8837 | 2018-11-08 10:20:19 +0000 | [diff] [blame] | 6 | #ifndef CPU_MACROS_S |
| 7 | #define CPU_MACROS_S |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 8 | |
Antonio Nino Diaz | a904487 | 2019-02-12 11:25:02 +0000 | [diff] [blame] | 9 | #include <assert_macros.S> |
Boyan Karatotev | e7d7c27 | 2023-01-25 16:55:18 +0000 | [diff] [blame] | 10 | #include <lib/cpus/cpu_ops.h> |
Boyan Karatotev | 5d38cb3 | 2023-01-27 09:37:07 +0000 | [diff] [blame] | 11 | #include <lib/cpus/errata.h> |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 12 | |
Soby Mathew | c704cbc | 2014-08-14 11:33:56 +0100 | [diff] [blame] | 13 | /* |
Jeenu Viswambharan | ee5eb80 | 2016-11-18 12:58:28 +0000 | [diff] [blame] | 14 | * Write given expressions as quad words |
| 15 | * |
| 16 | * _count: |
| 17 | * Write at least _count quad words. If the given number of |
| 18 | * expressions is less than _count, repeat the last expression to |
| 19 | * fill _count quad words in total |
| 20 | * _rest: |
| 21 | * Optional list of expressions. _this is for parameter extraction |
| 22 | * only, and has no significance to the caller |
| 23 | * |
| 24 | * Invoked as: |
| 25 | * fill_constants 2, foo, bar, blah, ... |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 26 | */ |
Jeenu Viswambharan | ee5eb80 | 2016-11-18 12:58:28 +0000 | [diff] [blame] | 27 | .macro fill_constants _count:req, _this, _rest:vararg |
| 28 | .ifgt \_count |
| 29 | /* Write the current expression */ |
| 30 | .ifb \_this |
| 31 | .error "Nothing to fill" |
| 32 | .endif |
| 33 | .quad \_this |
| 34 | |
| 35 | /* Invoke recursively for remaining expressions */ |
| 36 | .ifnb \_rest |
| 37 | fill_constants \_count-1, \_rest |
| 38 | .else |
| 39 | fill_constants \_count-1, \_this |
| 40 | .endif |
| 41 | .endif |
| 42 | .endm |
| 43 | |
| 44 | /* |
| 45 | * Declare CPU operations |
| 46 | * |
| 47 | * _name: |
| 48 | * Name of the CPU for which operations are being specified |
| 49 | * _midr: |
| 50 | * Numeric value expected to read from CPU's MIDR |
| 51 | * _resetfunc: |
| 52 | * Reset function for the CPU. If there's no CPU reset function, |
| 53 | * specify CPU_NO_RESET_FUNC |
Dimitris Papastamos | 914757c | 2018-03-12 14:47:09 +0000 | [diff] [blame] | 54 | * _extra1: |
| 55 | * This is a placeholder for future per CPU operations. Currently, |
| 56 | * some CPUs use this entry to set a test function to determine if |
| 57 | * the workaround for CVE-2017-5715 needs to be applied or not. |
Dimitris Papastamos | ba51d9e | 2018-05-16 11:36:14 +0100 | [diff] [blame] | 58 | * _extra2: |
Bipin Ravi | caa2e05 | 2022-02-23 23:45:50 -0600 | [diff] [blame] | 59 | * This is a placeholder for future per CPU operations. Currently |
Dimitris Papastamos | ba51d9e | 2018-05-16 11:36:14 +0100 | [diff] [blame] | 60 | * some CPUs use this entry to set a function to disable the |
| 61 | * workaround for CVE-2018-3639. |
Bipin Ravi | caa2e05 | 2022-02-23 23:45:50 -0600 | [diff] [blame] | 62 | * _extra3: |
| 63 | * This is a placeholder for future per CPU operations. Currently, |
| 64 | * some CPUs use this entry to set a test function to determine if |
| 65 | * the workaround for CVE-2022-23960 needs to be applied or not. |
laurenw-arm | 94accd3 | 2019-08-20 15:51:24 -0500 | [diff] [blame] | 66 | * _e_handler: |
| 67 | * This is a placeholder for future per CPU exception handlers. |
Jeenu Viswambharan | ee5eb80 | 2016-11-18 12:58:28 +0000 | [diff] [blame] | 68 | * _power_down_ops: |
| 69 | * Comma-separated list of functions to perform power-down |
| 70 | * operatios on the CPU. At least one, and up to |
| 71 | * CPU_MAX_PWR_DWN_OPS number of functions may be specified. |
| 72 | * Starting at power level 0, these functions shall handle power |
| 73 | * down at subsequent power levels. If there aren't exactly |
| 74 | * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be |
| 75 | * used to handle power down at subsequent levels |
| 76 | */ |
Dimitris Papastamos | 914757c | 2018-03-12 14:47:09 +0000 | [diff] [blame] | 77 | .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \ |
Bipin Ravi | caa2e05 | 2022-02-23 23:45:50 -0600 | [diff] [blame] | 78 | _extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg |
Chris Kay | 33bfc5e | 2023-02-14 11:30:04 +0000 | [diff] [blame] | 79 | .section .cpu_ops, "a" |
Jeenu Viswambharan | ee5eb80 | 2016-11-18 12:58:28 +0000 | [diff] [blame] | 80 | .align 3 |
Soby Mathew | c704cbc | 2014-08-14 11:33:56 +0100 | [diff] [blame] | 81 | .type cpu_ops_\_name, %object |
| 82 | .quad \_midr |
Roberto Vargas | e0e9946 | 2017-10-30 14:43:43 +0000 | [diff] [blame] | 83 | #if defined(IMAGE_AT_EL3) |
Jeenu Viswambharan | ee5eb80 | 2016-11-18 12:58:28 +0000 | [diff] [blame] | 84 | .quad \_resetfunc |
Soby Mathew | c704cbc | 2014-08-14 11:33:56 +0100 | [diff] [blame] | 85 | #endif |
Dimitris Papastamos | 914757c | 2018-03-12 14:47:09 +0000 | [diff] [blame] | 86 | .quad \_extra1 |
Dimitris Papastamos | ba51d9e | 2018-05-16 11:36:14 +0100 | [diff] [blame] | 87 | .quad \_extra2 |
Bipin Ravi | caa2e05 | 2022-02-23 23:45:50 -0600 | [diff] [blame] | 88 | .quad \_extra3 |
laurenw-arm | 94accd3 | 2019-08-20 15:51:24 -0500 | [diff] [blame] | 89 | .quad \_e_handler |
Masahiro Yamada | 441bfdd | 2016-12-25 23:36:24 +0900 | [diff] [blame] | 90 | #ifdef IMAGE_BL31 |
Jeenu Viswambharan | ee5eb80 | 2016-11-18 12:58:28 +0000 | [diff] [blame] | 91 | /* Insert list of functions */ |
| 92 | fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops |
Soby Mathew | 8e2f287 | 2014-08-14 12:49:05 +0100 | [diff] [blame] | 93 | #endif |
Boyan Karatotev | 821364e | 2023-01-27 09:35:10 +0000 | [diff] [blame] | 94 | /* |
| 95 | * It is possible (although unlikely) that a cpu may have no errata in |
| 96 | * code. In that case the start label will not be defined. The list is |
| 97 | * intended to be used in a loop, so define it as zero-length for |
| 98 | * predictable behaviour. Since this macro is always called at the end |
| 99 | * of the cpu file (after all errata have been parsed) we can be sure |
| 100 | * that we are at the end of the list. Some cpus call declare_cpu_ops |
| 101 | * twice, so only do this once. |
| 102 | */ |
| 103 | .pushsection .rodata.errata_entries |
| 104 | .ifndef \_name\()_errata_list_start |
| 105 | \_name\()_errata_list_start: |
| 106 | .endif |
| 107 | .ifndef \_name\()_errata_list_end |
| 108 | \_name\()_errata_list_end: |
| 109 | .endif |
| 110 | .popsection |
| 111 | |
| 112 | /* and now put them in cpu_ops */ |
| 113 | .quad \_name\()_errata_list_start |
| 114 | .quad \_name\()_errata_list_end |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 115 | |
| 116 | #if REPORT_ERRATA |
| 117 | .ifndef \_name\()_cpu_str |
| 118 | /* |
| 119 | * Place errata reported flag, and the spinlock to arbitrate access to |
| 120 | * it in the data section. |
| 121 | */ |
| 122 | .pushsection .data |
| 123 | define_asm_spinlock \_name\()_errata_lock |
| 124 | \_name\()_errata_reported: |
| 125 | .word 0 |
| 126 | .popsection |
| 127 | |
| 128 | /* Place CPU string in rodata */ |
| 129 | .pushsection .rodata |
| 130 | \_name\()_cpu_str: |
| 131 | .asciz "\_name" |
| 132 | .popsection |
| 133 | .endif |
| 134 | |
Boyan Karatotev | 821364e | 2023-01-27 09:35:10 +0000 | [diff] [blame] | 135 | |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 136 | /* |
Soby Mathew | 0980dce | 2018-09-17 04:34:35 +0100 | [diff] [blame] | 137 | * Mandatory errata status printing function for CPUs of |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 138 | * this class. |
| 139 | */ |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 140 | .quad \_name\()_errata_report |
Boyan Karatotev | 821364e | 2023-01-27 09:35:10 +0000 | [diff] [blame] | 141 | .quad \_name\()_cpu_str |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 142 | |
| 143 | #ifdef IMAGE_BL31 |
| 144 | /* Pointers to errata lock and reported flag */ |
| 145 | .quad \_name\()_errata_lock |
| 146 | .quad \_name\()_errata_reported |
Boyan Karatotev | 821364e | 2023-01-27 09:35:10 +0000 | [diff] [blame] | 147 | #endif /* IMAGE_BL31 */ |
| 148 | #endif /* REPORT_ERRATA */ |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 149 | |
Masahiro Yamada | 441bfdd | 2016-12-25 23:36:24 +0900 | [diff] [blame] | 150 | #if defined(IMAGE_BL31) && CRASH_REPORTING |
Soby Mathew | 38b4bc9 | 2014-08-14 13:36:41 +0100 | [diff] [blame] | 151 | .quad \_name\()_cpu_reg_dump |
| 152 | #endif |
Soby Mathew | c704cbc | 2014-08-14 11:33:56 +0100 | [diff] [blame] | 153 | .endm |
Dan Handley | ea59668 | 2015-04-01 17:34:24 +0100 | [diff] [blame] | 154 | |
Dimitris Papastamos | 914757c | 2018-03-12 14:47:09 +0000 | [diff] [blame] | 155 | .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \ |
| 156 | _power_down_ops:vararg |
Bipin Ravi | caa2e05 | 2022-02-23 23:45:50 -0600 | [diff] [blame] | 157 | declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \ |
Dimitris Papastamos | 914757c | 2018-03-12 14:47:09 +0000 | [diff] [blame] | 158 | \_power_down_ops |
| 159 | .endm |
| 160 | |
laurenw-arm | 94accd3 | 2019-08-20 15:51:24 -0500 | [diff] [blame] | 161 | .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \ |
| 162 | _e_handler:req, _power_down_ops:vararg |
| 163 | declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ |
Bipin Ravi | caa2e05 | 2022-02-23 23:45:50 -0600 | [diff] [blame] | 164 | 0, 0, 0, \_e_handler, \_power_down_ops |
laurenw-arm | 94accd3 | 2019-08-20 15:51:24 -0500 | [diff] [blame] | 165 | .endm |
| 166 | |
Dimitris Papastamos | ba51d9e | 2018-05-16 11:36:14 +0100 | [diff] [blame] | 167 | .macro declare_cpu_ops_wa _name:req, _midr:req, \ |
| 168 | _resetfunc:req, _extra1:req, _extra2:req, \ |
Bipin Ravi | caa2e05 | 2022-02-23 23:45:50 -0600 | [diff] [blame] | 169 | _extra3:req, _power_down_ops:vararg |
Dimitris Papastamos | 914757c | 2018-03-12 14:47:09 +0000 | [diff] [blame] | 170 | declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ |
Bipin Ravi | caa2e05 | 2022-02-23 23:45:50 -0600 | [diff] [blame] | 171 | \_extra1, \_extra2, \_extra3, 0, \_power_down_ops |
Dimitris Papastamos | 914757c | 2018-03-12 14:47:09 +0000 | [diff] [blame] | 172 | .endm |
| 173 | |
Boyan Karatotev | 821364e | 2023-01-27 09:35:10 +0000 | [diff] [blame] | 174 | /* TODO can be deleted once all CPUs have been converted */ |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 175 | #if REPORT_ERRATA |
| 176 | /* |
| 177 | * Print status of a CPU errata |
| 178 | * |
| 179 | * _chosen: |
| 180 | * Identifier indicating whether or not a CPU errata has been |
| 181 | * compiled in. |
| 182 | * _cpu: |
| 183 | * Name of the CPU |
| 184 | * _id: |
| 185 | * Errata identifier |
| 186 | * _rev_var: |
| 187 | * Register containing the combined value CPU revision and variant |
| 188 | * - typically the return value of cpu_get_rev_var |
| 189 | */ |
| 190 | .macro report_errata _chosen, _cpu, _id, _rev_var=x8 |
| 191 | /* Stash a string with errata ID */ |
| 192 | .pushsection .rodata |
| 193 | \_cpu\()_errata_\_id\()_str: |
| 194 | .asciz "\_id" |
| 195 | .popsection |
| 196 | |
| 197 | /* Check whether errata applies */ |
| 198 | mov x0, \_rev_var |
Jonathan Wright | efb1f33 | 2018-03-28 15:52:03 +0100 | [diff] [blame] | 199 | /* Shall clobber: x0-x7 */ |
Jeenu Viswambharan | d5ec367 | 2017-01-03 11:01:51 +0000 | [diff] [blame] | 200 | bl check_errata_\_id |
| 201 | |
| 202 | .ifeq \_chosen |
| 203 | /* |
| 204 | * Errata workaround has not been compiled in. If the errata would have |
| 205 | * applied had it been compiled in, print its status as missing. |
| 206 | */ |
| 207 | cbz x0, 900f |
| 208 | mov x0, #ERRATA_MISSING |
| 209 | .endif |
| 210 | 900: |
| 211 | adr x1, \_cpu\()_cpu_str |
| 212 | adr x2, \_cpu\()_errata_\_id\()_str |
| 213 | bl errata_print_msg |
| 214 | .endm |
| 215 | #endif |
| 216 | |
Dimitris Papastamos | 780cc95 | 2018-03-12 13:27:02 +0000 | [diff] [blame] | 217 | /* |
| 218 | * This macro is used on some CPUs to detect if they are vulnerable |
| 219 | * to CVE-2017-5715. |
| 220 | */ |
| 221 | .macro cpu_check_csv2 _reg _label |
| 222 | mrs \_reg, id_aa64pfr0_el1 |
| 223 | ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH |
| 224 | /* |
Antonio Nino Diaz | a904487 | 2019-02-12 11:25:02 +0000 | [diff] [blame] | 225 | * If the field equals 1, branch targets trained in one context cannot |
| 226 | * affect speculative execution in a different context. |
| 227 | * |
| 228 | * If the field equals 2, it means that the system is also aware of |
| 229 | * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we |
| 230 | * expect users of the registers to do the right thing. |
| 231 | * |
| 232 | * Only apply mitigations if the value of this field is 0. |
Dimitris Papastamos | 780cc95 | 2018-03-12 13:27:02 +0000 | [diff] [blame] | 233 | */ |
Antonio Nino Diaz | a904487 | 2019-02-12 11:25:02 +0000 | [diff] [blame] | 234 | #if ENABLE_ASSERTIONS |
| 235 | cmp \_reg, #3 /* Only values 0 to 2 are expected */ |
| 236 | ASM_ASSERT(lo) |
| 237 | #endif |
| 238 | |
| 239 | cmp \_reg, #0 |
| 240 | bne \_label |
Dimitris Papastamos | 780cc95 | 2018-03-12 13:27:02 +0000 | [diff] [blame] | 241 | .endm |
Deepak Pandey | b561536 | 2018-10-11 13:44:43 +0530 | [diff] [blame] | 242 | |
| 243 | /* |
| 244 | * Helper macro that reads the part number of the current |
| 245 | * CPU and jumps to the given label if it matches the CPU |
| 246 | * MIDR provided. |
| 247 | * |
| 248 | * Clobbers x0. |
| 249 | */ |
| 250 | .macro jump_if_cpu_midr _cpu_midr, _label |
| 251 | mrs x0, midr_el1 |
| 252 | ubfx x0, x0, MIDR_PN_SHIFT, #12 |
| 253 | cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK) |
| 254 | b.eq \_label |
| 255 | .endm |
Antonio Nino Diaz | 5eb8837 | 2018-11-08 10:20:19 +0000 | [diff] [blame] | 256 | |
Boyan Karatotev | 821364e | 2023-01-27 09:35:10 +0000 | [diff] [blame] | 257 | |
| 258 | /* |
| 259 | * Workaround wrappers for errata that apply at reset or runtime. Reset errata |
| 260 | * will be applied automatically |
| 261 | * |
| 262 | * _cpu: |
| 263 | * Name of cpu as given to declare_cpu_ops |
| 264 | * |
| 265 | * _cve: |
| 266 | * Whether erratum is a CVE. CVE year if yes, 0 otherwise |
| 267 | * |
| 268 | * _id: |
| 269 | * Erratum or CVE number. Please combine with previous field with ERRATUM |
| 270 | * or CVE macros |
| 271 | * |
| 272 | * _chosen: |
| 273 | * Compile time flag on whether the erratum is included |
| 274 | * |
| 275 | * _apply_at_reset: |
| 276 | * Whether the erratum should be automatically applied at reset |
| 277 | */ |
| 278 | .macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req |
| 279 | .pushsection .rodata.errata_entries |
| 280 | .align 3 |
| 281 | .ifndef \_cpu\()_errata_list_start |
| 282 | \_cpu\()_errata_list_start: |
| 283 | .endif |
| 284 | |
| 285 | /* check if unused and compile out if no references */ |
| 286 | .if \_apply_at_reset && \_chosen |
| 287 | .quad erratum_\_cpu\()_\_id\()_wa |
| 288 | .else |
| 289 | .quad 0 |
| 290 | .endif |
| 291 | /* TODO(errata ABI): this prevents all checker functions from |
| 292 | * being optimised away. Can be done away with unless the ABI |
| 293 | * needs them */ |
| 294 | .quad check_erratum_\_cpu\()_\_id |
| 295 | /* Will fit CVEs with up to 10 character in the ID field */ |
| 296 | .word \_id |
| 297 | .hword \_cve |
| 298 | .byte \_chosen |
| 299 | /* TODO(errata ABI): mitigated field for known but unmitigated |
| 300 | * errata */ |
| 301 | .byte 0x1 |
| 302 | .popsection |
| 303 | .endm |
| 304 | |
| 305 | .macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req |
| 306 | add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset |
| 307 | |
| 308 | func erratum_\_cpu\()_\_id\()_wa |
| 309 | mov x8, x30 |
| 310 | |
| 311 | /* save rev_var for workarounds that might need it but don't |
| 312 | * restore to x0 because few will care */ |
| 313 | mov x7, x0 |
| 314 | bl check_erratum_\_cpu\()_\_id |
| 315 | cbz x0, erratum_\_cpu\()_\_id\()_skip |
| 316 | .endm |
| 317 | |
| 318 | .macro _workaround_end _cpu:req, _id:req |
| 319 | erratum_\_cpu\()_\_id\()_skip: |
| 320 | ret x8 |
| 321 | endfunc erratum_\_cpu\()_\_id\()_wa |
| 322 | .endm |
| 323 | |
| 324 | /******************************************************************************* |
| 325 | * Errata workaround wrappers |
| 326 | ******************************************************************************/ |
| 327 | /* |
| 328 | * Workaround wrappers for errata that apply at reset or runtime. Reset errata |
| 329 | * will be applied automatically |
| 330 | * |
| 331 | * _cpu: |
| 332 | * Name of cpu as given to declare_cpu_ops |
| 333 | * |
| 334 | * _cve: |
| 335 | * Whether erratum is a CVE. CVE year if yes, 0 otherwise |
| 336 | * |
| 337 | * _id: |
| 338 | * Erratum or CVE number. Please combine with previous field with ERRATUM |
| 339 | * or CVE macros |
| 340 | * |
| 341 | * _chosen: |
| 342 | * Compile time flag on whether the erratum is included |
| 343 | * |
| 344 | * in body: |
| 345 | * clobber x0 to x7 (please only use those) |
| 346 | * argument x7 - cpu_rev_var |
| 347 | * |
| 348 | * _wa clobbers: x0-x8 (PCS compliant) |
| 349 | */ |
| 350 | .macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req |
| 351 | _workaround_start \_cpu, \_cve, \_id, \_chosen, 1 |
| 352 | .endm |
| 353 | |
| 354 | /* |
| 355 | * See `workaround_reset_start` for usage info. Additional arguments: |
| 356 | * |
| 357 | * _midr: |
| 358 | * Check if CPU's MIDR matches the CPU it's meant for. Must be specified |
| 359 | * for errata applied in generic code |
| 360 | */ |
| 361 | .macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr |
| 362 | /* |
| 363 | * Let errata specify if they need MIDR checking. Sadly, storing the |
| 364 | * MIDR in an .equ to retrieve automatically blows up as it stores some |
| 365 | * brackets in the symbol |
| 366 | */ |
| 367 | .ifnb \_midr |
| 368 | jump_if_cpu_midr \_midr, 1f |
| 369 | b erratum_\_cpu\()_\_id\()_skip |
| 370 | |
| 371 | 1: |
| 372 | .endif |
| 373 | _workaround_start \_cpu, \_cve, \_id, \_chosen, 0 |
| 374 | .endm |
| 375 | |
| 376 | /* |
| 377 | * Usage and arguments identical to `workaround_reset_start`. The _cve argument |
| 378 | * is kept here so the same #define can be used as that macro |
| 379 | */ |
| 380 | .macro workaround_reset_end _cpu:req, _cve:req, _id:req |
| 381 | _workaround_end \_cpu, \_id |
| 382 | .endm |
| 383 | |
| 384 | /* |
| 385 | * See `workaround_reset_start` for usage info. The _cve argument is kept here |
| 386 | * so the same #define can be used as that macro. Additional arguments: |
| 387 | * |
| 388 | * _no_isb: |
| 389 | * Optionally do not include the trailing isb. Please disable with the |
| 390 | * NO_ISB macro |
| 391 | */ |
| 392 | .macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb |
| 393 | /* |
| 394 | * Runtime errata do not have a reset function to call the isb for them |
| 395 | * and missing the isb could be very problematic. It is also likely as |
| 396 | * they tend to be scattered in generic code. |
| 397 | */ |
| 398 | .ifb \_no_isb |
| 399 | isb |
| 400 | .endif |
| 401 | _workaround_end \_cpu, \_id |
| 402 | .endm |
| 403 | |
| 404 | /******************************************************************************* |
| 405 | * Errata workaround helpers |
| 406 | ******************************************************************************/ |
| 407 | /* |
| 408 | * Set a bit in a system register. Can set multiple bits but is limited by the |
| 409 | * way the ORR instruction encodes them. |
| 410 | * |
| 411 | * _reg: |
| 412 | * Register to write to |
| 413 | * |
| 414 | * _bit: |
| 415 | * Bit to set. Please use a descriptive #define |
| 416 | * |
| 417 | * _assert: |
| 418 | * Optionally whether to read back and assert that the bit has been |
| 419 | * written. Please disable with NO_ASSERT macro |
| 420 | * |
| 421 | * clobbers: x1 |
| 422 | */ |
| 423 | .macro sysreg_bit_set _reg:req, _bit:req, _assert=1 |
| 424 | mrs x1, \_reg |
| 425 | orr x1, x1, #\_bit |
| 426 | msr \_reg, x1 |
| 427 | .endm |
| 428 | |
| 429 | /* |
| 430 | * Apply erratum |
| 431 | * |
| 432 | * _cpu: |
| 433 | * Name of cpu as given to declare_cpu_ops |
| 434 | * |
| 435 | * _cve: |
| 436 | * Whether erratum is a CVE. CVE year if yes, 0 otherwise |
| 437 | * |
| 438 | * _id: |
| 439 | * Erratum or CVE number. Please combine with previous field with ERRATUM |
| 440 | * or CVE macros |
| 441 | * |
| 442 | * _chosen: |
| 443 | * Compile time flag on whether the erratum is included |
| 444 | * |
| 445 | * clobbers: x0-x9 (PCS compliant) |
| 446 | */ |
| 447 | .macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req |
| 448 | .if \_chosen |
| 449 | mov x9, x30 |
| 450 | bl cpu_get_rev_var |
| 451 | bl erratum_\_cpu\()_\_id\()_wa |
| 452 | mov x30, x9 |
| 453 | |
| 454 | .endif |
| 455 | .endm |
| 456 | |
| 457 | /* |
| 458 | * Helpers to select which revisions errata apply to. Don't leave a link |
| 459 | * register as the cpu_rev_var_*** will call the ret and we can save on one. |
| 460 | * |
| 461 | * _cpu: |
| 462 | * Name of cpu as given to declare_cpu_ops |
| 463 | * |
| 464 | * _cve: |
| 465 | * Whether erratum is a CVE. CVE year if yes, 0 otherwise |
| 466 | * |
| 467 | * _id: |
| 468 | * Erratum or CVE number. Please combine with previous field with ERRATUM |
| 469 | * or CVE macros |
| 470 | * |
| 471 | * _rev_num: |
| 472 | * Revision to apply to |
| 473 | * |
| 474 | * in body: |
| 475 | * clobber: x0 to x4 |
| 476 | * argument: x0 - cpu_rev_var |
| 477 | */ |
| 478 | .macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req |
| 479 | func check_erratum_\_cpu\()_\_id |
| 480 | mov x1, #\_rev_num |
| 481 | b cpu_rev_var_ls |
| 482 | endfunc check_erratum_\_cpu\()_\_id |
| 483 | .endm |
| 484 | |
| 485 | .macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req |
| 486 | func check_erratum_\_cpu\()_\_id |
| 487 | mov x1, #\_rev_num |
| 488 | b cpu_rev_var_hs |
| 489 | endfunc check_erratum_\_cpu\()_\_id |
| 490 | .endm |
| 491 | |
| 492 | .macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req |
| 493 | func check_erratum_\_cpu\()_\_id |
| 494 | mov x1, #\_rev_num_lo |
| 495 | mov x2, #\_rev_num_hi |
| 496 | b cpu_rev_var_range |
| 497 | endfunc check_erratum_\_cpu\()_\_id |
| 498 | .endm |
| 499 | |
| 500 | /******************************************************************************* |
| 501 | * CPU reset function wrapper |
| 502 | ******************************************************************************/ |
| 503 | |
| 504 | /* |
| 505 | * Wrapper to automatically apply all reset-time errata. Will end with an isb. |
| 506 | * |
| 507 | * _cpu: |
| 508 | * Name of cpu as given to declare_cpu_ops |
| 509 | * |
| 510 | * in body: |
| 511 | * clobber x8 to x14 |
| 512 | * argument x14 - cpu_rev_var |
| 513 | */ |
| 514 | .macro cpu_reset_func_start _cpu:req |
| 515 | func \_cpu\()_reset_func |
| 516 | mov x15, x30 |
| 517 | bl cpu_get_rev_var |
| 518 | mov x14, x0 |
| 519 | |
| 520 | /* short circuit the location to avoid searching the list */ |
| 521 | adrp x12, \_cpu\()_errata_list_start |
| 522 | add x12, x12, :lo12:\_cpu\()_errata_list_start |
| 523 | adrp x13, \_cpu\()_errata_list_end |
| 524 | add x13, x13, :lo12:\_cpu\()_errata_list_end |
| 525 | |
| 526 | errata_begin: |
| 527 | /* if head catches up with end of list, exit */ |
| 528 | cmp x12, x13 |
| 529 | b.eq errata_end |
| 530 | |
| 531 | ldr x10, [x12, #ERRATUM_WA_FUNC] |
| 532 | /* TODO(errata ABI): check mitigated and checker function fields |
| 533 | * for 0 */ |
| 534 | ldrb w11, [x12, #ERRATUM_CHOSEN] |
| 535 | |
| 536 | /* skip if not chosen */ |
| 537 | cbz x11, 1f |
| 538 | /* skip if runtime erratum */ |
| 539 | cbz x10, 1f |
| 540 | |
| 541 | /* put cpu revision in x0 and call workaround */ |
| 542 | mov x0, x14 |
| 543 | blr x10 |
| 544 | 1: |
| 545 | add x12, x12, #ERRATUM_ENTRY_SIZE |
| 546 | b errata_begin |
| 547 | errata_end: |
| 548 | .endm |
| 549 | |
| 550 | .macro cpu_reset_func_end _cpu:req |
| 551 | isb |
| 552 | ret x15 |
| 553 | endfunc \_cpu\()_reset_func |
| 554 | .endm |
Boyan Karatotev | 29fa56d | 2023-01-27 09:38:15 +0000 | [diff] [blame] | 555 | |
| 556 | /* |
| 557 | * Maintain compatibility with the old scheme of each cpu has its own reporting. |
| 558 | * TODO remove entirely once all cpus have been converted. This includes the |
| 559 | * cpu_ops entry, as print_errata_status can call this directly for all cpus |
| 560 | */ |
| 561 | .macro errata_report_shim _cpu:req |
| 562 | #if REPORT_ERRATA |
| 563 | func \_cpu\()_errata_report |
| 564 | /* normal stack frame for pretty debugging */ |
| 565 | stp x29, x30, [sp, #-16]! |
| 566 | mov x29, sp |
| 567 | |
| 568 | bl generic_errata_report |
| 569 | |
| 570 | ldp x29, x30, [sp], #16 |
| 571 | ret |
| 572 | endfunc \_cpu\()_errata_report |
| 573 | #endif |
| 574 | .endm |
Antonio Nino Diaz | 5eb8837 | 2018-11-08 10:20:19 +0000 | [diff] [blame] | 575 | #endif /* CPU_MACROS_S */ |