perf(cpus): make reset errata do fewer branches
Errata application is painful for performance. For a start, it's done
when the core has just come out of reset, which means branch predictors
and caches will be empty so a branch to a workaround function must be
fetched from memory and that round trip is very slow. Then it also runs
with the I-cache off, which means that the loop to iterate over the
workarounds must also be fetched from memory on each iteration.
We can remove both branches. First, we can simply apply every erratum
directly instead of defining a workaround function and jumping to it.
Currently, no errata that need to be applied at both reset and runtime,
with the same workaround function, exist. If the need arose in future,
this should be achievable with a reset + runtime wrapper combo.
Then, we can construct a function that applies each erratum linearly
instead of looping over the list. If this function is part of the reset
function, then the only "far" branches at reset will be for the checker
functions. Importantly, this mitigates the slowdown even when an erratum
is disabled.
The result is ~50% speedup on N1SDP and ~20% on AArch64 Juno on wakeup
from PSCI calls that end in powerdown. This is roughly back to the
baseline of v2.9, before the errata framework regressed on performance
(or a little better). It is important to note that there are other
slowdowns since then that remain unknown.
Change-Id: Ie4d5288a331b11fd648e5c4a0b652b74160b07b9
Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
index a878a5f..31f8811 100644
--- a/include/lib/cpus/aarch32/cpu_macros.S
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -172,11 +172,6 @@
\_cpu\()_errata_list_start:
.endif
- /* unused on AArch32, maintain for portability */
- .word 0
- /* TODO(errata ABI): this prevents all checker functions from
- * being optimised away. Can be done away with unless the ABI
- * needs them */
.ifnb \_special
.word check_errata_\_special
.elseif \_cve
@@ -188,9 +183,7 @@
.word \_id
.hword \_cve
.byte \_chosen
- /* TODO(errata ABI): mitigated field for known but unmitigated
- * errata*/
- .byte 0x1
+ .byte 0x0 /* alignment */
.popsection
.endm
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index c8f4bde..f3df595 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -238,52 +238,24 @@
* _apply_at_reset:
* Whether the erratum should be automatically applied at reset
*/
-.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
+.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req
+#if REPORT_ERRATA || ERRATA_ABI_SUPPORT
.pushsection .rodata.errata_entries
.align 3
.ifndef \_cpu\()_errata_list_start
\_cpu\()_errata_list_start:
.endif
- /* check if unused and compile out if no references */
- .if \_apply_at_reset && \_chosen
- .quad erratum_\_cpu\()_\_id\()_wa
- .else
- .quad 0
- .endif
- /* TODO(errata ABI): this prevents all checker functions from
- * being optimised away. Can be done away with unless the ABI
- * needs them */
.quad check_erratum_\_cpu\()_\_id
/* Will fit CVEs with up to 10 character in the ID field */
.word \_id
.hword \_cve
.byte \_chosen
- /* TODO(errata ABI): mitigated field for known but unmitigated
- * errata */
- .byte 0x1
+ .byte 0x0 /* alignment */
.popsection
+#endif
.endm
-.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
- add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
-
- func erratum_\_cpu\()_\_id\()_wa
- mov x8, x30
-
- /* save rev_var for workarounds that might need it but don't
- * restore to x0 because few will care */
- mov x7, x0
- bl check_erratum_\_cpu\()_\_id
- cbz x0, erratum_\_cpu\()_\_id\()_skip
-.endm
-
-.macro _workaround_end _cpu:req, _id:req
- erratum_\_cpu\()_\_id\()_skip:
- ret x8
- endfunc erratum_\_cpu\()_\_id\()_wa
-.endm
-
/*******************************************************************************
* Errata workaround wrappers
******************************************************************************/
@@ -311,7 +283,22 @@
* _wa clobbers: x0-x8 (PCS compliant)
*/
.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
- _workaround_start \_cpu, \_cve, \_id, \_chosen, 1
+ add_erratum_entry \_cpu, \_cve, \_id, \_chosen
+
+ .if \_chosen
+ /* put errata directly into the reset function */
+ .pushsection .text.asm.\_cpu\()_reset_func, "ax"
+ .else
+ /* or something else that will get garbage collected by the
+ * linker */
+ .pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
+ .endif
+ /* revision is stored in x14, get it */
+ mov x0, x14
+ bl check_erratum_\_cpu\()_\_id
+ /* save rev_var for workarounds that might need it */
+ mov x7, x14
+ cbz x0, erratum_\_cpu\()_\_id\()_skip_reset
.endm
/*
@@ -322,6 +309,10 @@
* for errata applied in generic code
*/
.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
+ add_erratum_entry \_cpu, \_cve, \_id, \_chosen
+
+ func erratum_\_cpu\()_\_id\()_wa
+ mov x8, x30
/*
* Let errata specify if they need MIDR checking. Sadly, storing the
* MIDR in an .equ to retrieve automatically blows up as it stores some
@@ -329,11 +320,15 @@
*/
.ifnb \_midr
jump_if_cpu_midr \_midr, 1f
- b erratum_\_cpu\()_\_id\()_skip
+ b erratum_\_cpu\()_\_id\()_skip_runtime
1:
.endif
- _workaround_start \_cpu, \_cve, \_id, \_chosen, 0
+ /* save rev_var for workarounds that might need it but don't
+ * restore to x0 because few will care */
+ mov x7, x0
+ bl check_erratum_\_cpu\()_\_id
+ cbz x0, erratum_\_cpu\()_\_id\()_skip_runtime
.endm
/*
@@ -341,7 +336,8 @@
* is kept here so the same #define can be used as that macro
*/
.macro workaround_reset_end _cpu:req, _cve:req, _id:req
- _workaround_end \_cpu, \_id
+ erratum_\_cpu\()_\_id\()_skip_reset:
+ .popsection
.endm
/*
@@ -361,7 +357,9 @@
.ifb \_no_isb
isb
.endif
- _workaround_end \_cpu, \_id
+ erratum_\_cpu\()_\_id\()_skip_runtime:
+ ret x8
+ endfunc erratum_\_cpu\()_\_id\()_wa
.endm
/*******************************************************************************
@@ -598,48 +596,33 @@
******************************************************************************/
/*
- * Wrapper to automatically apply all reset-time errata. Will end with an isb.
+ * Helper to register a cpu with the errata framework. Begins the definition of
+ * the reset function.
*
* _cpu:
* Name of cpu as given to declare_cpu_ops
+ */
+.macro cpu_reset_prologue _cpu:req
+ func \_cpu\()_reset_func
+ mov x15, x30
+ get_rev_var x14, x0
+.endm
+
+/*
+ * Wrapper of the reset function to automatically apply all reset-time errata.
+ * Will end with an isb.
+ *
+ * _cpu:
+ * Name of cpu as given to declare_cpu_ops
*
* in body:
* clobber x8 to x14
* argument x14 - cpu_rev_var
*/
.macro cpu_reset_func_start _cpu:req
- func \_cpu\()_reset_func
- mov x15, x30
- get_rev_var x14, x0
-
- /* short circuit the location to avoid searching the list */
- adrp x12, \_cpu\()_errata_list_start
- add x12, x12, :lo12:\_cpu\()_errata_list_start
- adrp x13, \_cpu\()_errata_list_end
- add x13, x13, :lo12:\_cpu\()_errata_list_end
-
- errata_begin:
- /* if head catches up with end of list, exit */
- cmp x12, x13
- b.eq errata_end
-
- ldr x10, [x12, #ERRATUM_WA_FUNC]
- /* TODO(errata ABI): check mitigated and checker function fields
- * for 0 */
- ldrb w11, [x12, #ERRATUM_CHOSEN]
-
- /* skip if not chosen */
- cbz x11, 1f
- /* skip if runtime erratum */
- cbz x10, 1f
-
- /* put cpu revision in x0 and call workaround */
- mov x0, x14
- blr x10
- 1:
- add x12, x12, #ERRATUM_ENTRY_SIZE
- b errata_begin
- errata_end:
+ /* the func/endfunc macros will change sections. So change the section
+ * back to the reset function's */
+ .section .text.asm.\_cpu\()_reset_func, "ax"
.endm
.macro cpu_reset_func_end _cpu:req
diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h
index b9166f7..10b949f 100644
--- a/include/lib/cpus/errata.h
+++ b/include/lib/cpus/errata.h
@@ -9,20 +9,18 @@
#include <lib/cpus/cpu_ops.h>
-#define ERRATUM_WA_FUNC_SIZE CPU_WORD_SIZE
#define ERRATUM_CHECK_FUNC_SIZE CPU_WORD_SIZE
#define ERRATUM_ID_SIZE 4
#define ERRATUM_CVE_SIZE 2
#define ERRATUM_CHOSEN_SIZE 1
-#define ERRATUM_MITIGATED_SIZE 1
+#define ERRATUM_ALIGNMENT_SIZE 1
-#define ERRATUM_WA_FUNC 0
-#define ERRATUM_CHECK_FUNC ERRATUM_WA_FUNC + ERRATUM_WA_FUNC_SIZE
+#define ERRATUM_CHECK_FUNC 0
#define ERRATUM_ID ERRATUM_CHECK_FUNC + ERRATUM_CHECK_FUNC_SIZE
#define ERRATUM_CVE ERRATUM_ID + ERRATUM_ID_SIZE
#define ERRATUM_CHOSEN ERRATUM_CVE + ERRATUM_CVE_SIZE
-#define ERRATUM_MITIGATED ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
-#define ERRATUM_ENTRY_SIZE ERRATUM_MITIGATED + ERRATUM_MITIGATED_SIZE
+#define ERRATUM_ALIGNMENT ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
+#define ERRATUM_ENTRY_SIZE ERRATUM_ALIGNMENT + ERRATUM_ALIGNMENT_SIZE
/* Errata status */
#define ERRATA_NOT_APPLIES 0
@@ -39,15 +37,13 @@
* uintptr_t will reflect the change and the alignment will be correct in both.
*/
struct erratum_entry {
- uintptr_t (*wa_func)(uint64_t cpu_rev);
uintptr_t (*check_func)(uint64_t cpu_rev);
/* Will fit CVEs with up to 10 character in the ID field */
uint32_t id;
/* Denote CVEs with their year or errata with 0 */
uint16_t cve;
uint8_t chosen;
- /* TODO(errata ABI): placeholder for the mitigated field */
- uint8_t _mitigated;
+ uint8_t _alignment;
} __packed;
CASSERT(sizeof(struct erratum_entry) == ERRATUM_ENTRY_SIZE,