blob: c922cb99e156caef0bcd50e91e328f9c4b06cc62 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Boyan Karatotevcc30ccf2023-01-27 10:51:27 +00002 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010015
16 /* Reset fn is needed in BL at reset vector */
Roberto Vargase0e99462017-10-30 14:43:43 +000017#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathewc704cbc2014-08-14 11:33:56 +010018 /*
19 * The reset handler common to all platforms. After a matching
20 * cpu_ops structure entry is found, the correponding reset_handler
21 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000022 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010023 */
24 .globl reset_handler
25func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010026 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010027
Soby Mathewb5a63042015-01-29 12:00:58 +000028 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010029 bl plat_reset_handler
30
Soby Mathewc704cbc2014-08-14 11:33:56 +010031 /* Get the matching cpu_ops pointer */
32 bl get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +010033
34 /* Get the cpu_ops reset handler */
35 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010036 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010037 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000038
39 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010040 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100411:
Soby Mathewc0884332014-09-22 12:11:36 +010042 ret
Kévin Petita877c252015-03-24 14:03:57 +000043endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010044
Roberto Vargase0e99462017-10-30 14:43:43 +000045#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010046
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090047#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010048 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000049 * void prepare_cpu_pwr_dwn(unsigned int power_level)
50 *
51 * Prepare CPU power down function for all platforms. The function takes
52 * a domain level to be powered down as its parameter. After the cpu_ops
53 * pointer is retrieved from cpu_data, the handler for requested power
54 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010055 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000056 .globl prepare_cpu_pwr_dwn
57func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010058 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000059 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
60 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010061 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000062 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
63 cmp x0, x2
64 csel x2, x2, x0, hi
65
Soby Mathew8e2f2872014-08-14 12:49:05 +010066 mrs x1, tpidr_el3
67 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010068#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010069 cmp x0, #0
70 ASM_ASSERT(ne)
71#endif
72
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000073 /* Get the appropriate power down handler */
74 mov x1, #CPU_PWR_DWN_OPS
75 add x1, x1, x2, lsl #3
76 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070077#if ENABLE_ASSERTIONS
78 cmp x1, #0
79 ASM_ASSERT(ne)
80#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010081 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000082endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010083
84
85 /*
86 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000087 * in cpu_data. This can be called without a runtime stack, but may
88 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010089 * clobbers: x0 - x6, x10
90 */
91 .globl init_cpu_ops
92func init_cpu_ops
93 mrs x6, tpidr_el3
94 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
95 cbnz x0, 1f
96 mov x10, x30
97 bl get_cpu_ops_ptr
Soby Mathew7d861ea2014-11-18 10:14:14 +000098 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +010099 mov x30, x10
1001:
101 ret
Kévin Petita877c252015-03-24 14:03:57 +0000102endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100103#endif /* IMAGE_BL31 */
104
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900105#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100106 /*
107 * The cpu specific registers which need to be reported in a crash
108 * are reported via cpu_ops cpu_reg_dump function. After a matching
109 * cpu_ops structure entry is found, the correponding cpu_reg_dump
110 * in the cpu_ops is invoked.
111 */
112 .globl do_cpu_reg_dump
113func do_cpu_reg_dump
114 mov x16, x30
115
116 /* Get the matching cpu_ops pointer */
117 bl get_cpu_ops_ptr
118 cbz x0, 1f
119
120 /* Get the cpu_ops cpu_reg_dump */
121 ldr x2, [x0, #CPU_REG_DUMP]
122 cbz x2, 1f
123 blr x2
1241:
125 mov x30, x16
126 ret
Kévin Petita877c252015-03-24 14:03:57 +0000127endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100128#endif
129
Soby Mathewc704cbc2014-08-14 11:33:56 +0100130 /*
131 * The below function returns the cpu_ops structure matching the
132 * midr of the core. It reads the MIDR_EL1 and finds the matching
133 * entry in cpu_ops entries. Only the implementation and part number
134 * are used to match the entries.
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100135 *
136 * If cpu_ops for the MIDR_EL1 cannot be found and
137 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
138 * default cpu_ops with an MIDR value of 0.
Olivier Deprez7d0299f2021-05-25 12:06:03 +0200139 * (Implementation number 0x0 should be reserved for software use
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100140 * and therefore no clashes should happen with that default value).
141 *
Soby Mathewc704cbc2014-08-14 11:33:56 +0100142 * Return :
143 * x0 - The matching cpu_ops pointer on Success
144 * x0 - 0 on failure.
145 * Clobbers : x0 - x5
146 */
147 .globl get_cpu_ops_ptr
148func get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100149 /* Read the MIDR_EL1 */
150 mrs x2, midr_el1
151 mov_imm x3, CPU_IMPL_PN_MASK
152
153 /* Retain only the implementation and part number using mask */
154 and w2, w2, w3
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100155
156 /* Get the cpu_ops end location */
157 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
158
159 /* Initialize the return parameter */
160 mov x0, #0
Soby Mathewc704cbc2014-08-14 11:33:56 +01001611:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100162 /* Get the cpu_ops start location */
163 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
164
1652:
Soby Mathewc704cbc2014-08-14 11:33:56 +0100166 /* Check if we have reached end of list */
167 cmp x4, x5
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100168 b.eq search_def_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100169
170 /* load the midr from the cpu_ops */
171 ldr x1, [x4], #CPU_OPS_SIZE
172 and w1, w1, w3
173
174 /* Check if midr matches to midr of this core */
175 cmp w1, w2
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100176 b.ne 2b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100177
178 /* Subtract the increment and offset to get the cpu-ops pointer */
179 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700180#if ENABLE_ASSERTIONS
181 cmp x0, #0
182 ASM_ASSERT(ne)
183#endif
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100184#ifdef SUPPORT_UNKNOWN_MPID
185 cbnz x2, exit_mpid_found
186 /* Mark the unsupported MPID flag */
187 adrp x1, unsupported_mpid_flag
188 add x1, x1, :lo12:unsupported_mpid_flag
189 str w2, [x1]
190exit_mpid_found:
191#endif
192 ret
193
194 /*
195 * Search again for a default pointer (MIDR = 0x0)
196 * or return error if already searched.
197 */
198search_def_ptr:
199#ifdef SUPPORT_UNKNOWN_MPID
200 cbz x2, error_exit
201 mov x2, #0
202 b 1b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100203error_exit:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100204#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100205 ret
Kévin Petita877c252015-03-24 14:03:57 +0000206endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100207
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000208/*
209 * Extract CPU revision and variant, and combine them into a single numeric for
210 * easier comparison.
211 */
212 .globl cpu_get_rev_var
213func cpu_get_rev_var
214 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100215
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000216 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000217 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
218 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000219 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000220 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
221 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000222 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000223 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
224 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
225 ret
226endfunc cpu_get_rev_var
227
228/*
229 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
230 * application purposes. If the revision-variant is less than or same as a given
231 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100232 *
233 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000234 */
235 .globl cpu_rev_var_ls
236func cpu_rev_var_ls
237 mov x2, #ERRATA_APPLIES
238 mov x3, #ERRATA_NOT_APPLIES
239 cmp x0, x1
240 csel x0, x2, x3, ls
241 ret
242endfunc cpu_rev_var_ls
243
Andre Przywara00eefd92016-10-06 16:54:53 +0100244/*
245 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
246 * application purposes. If the revision-variant is higher than or same as a
247 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100248 *
249 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100250 */
251 .globl cpu_rev_var_hs
252func cpu_rev_var_hs
253 mov x2, #ERRATA_APPLIES
254 mov x3, #ERRATA_NOT_APPLIES
255 cmp x0, x1
256 csel x0, x2, x3, hs
257 ret
258endfunc cpu_rev_var_hs
259
laurenw-arm94accd32019-08-20 15:51:24 -0500260/*
261 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
262 * application purposes. If the revision-variant is between or includes the given
263 * values, this indicates that errata applies; otherwise not.
264 *
265 * Shall clobber: x0-x4
266 */
267 .globl cpu_rev_var_range
268func cpu_rev_var_range
269 mov x3, #ERRATA_APPLIES
270 mov x4, #ERRATA_NOT_APPLIES
271 cmp x0, x1
272 csel x1, x3, x4, hs
273 cbz x1, 1f
274 cmp x0, x2
275 csel x1, x3, x4, ls
2761:
277 mov x0, x1
278 ret
279endfunc cpu_rev_var_range
280
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000281#if REPORT_ERRATA
282/*
283 * void print_errata_status(void);
284 *
285 * Function to print errata status for CPUs of its class. Must be called only:
286 *
287 * - with MMU and data caches are enabled;
288 * - after cpu_ops have been initialized in per-CPU data.
289 */
290 .globl print_errata_status
291func print_errata_status
292#ifdef IMAGE_BL1
293 /*
294 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
295 * directly.
296 */
297 stp xzr, x30, [sp, #-16]!
298 bl get_cpu_ops_ptr
299 ldp xzr, x30, [sp], #16
300 ldr x1, [x0, #CPU_ERRATA_FUNC]
301 cbnz x1, .Lprint
302#else
303 /*
304 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
305 * errata printing function. If it's non-NULL, jump to the function in
306 * turn.
307 */
308 mrs x0, tpidr_el3
Varun Wadekar718c8762019-10-01 09:34:10 -0700309#if ENABLE_ASSERTIONS
310 cmp x0, #0
311 ASM_ASSERT(ne)
312#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000313 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700314#if ENABLE_ASSERTIONS
315 cmp x1, #0
316 ASM_ASSERT(ne)
317#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000318 ldr x0, [x1, #CPU_ERRATA_FUNC]
319 cbz x0, .Lnoprint
320
321 /*
322 * Printing errata status requires atomically testing the printed flag.
323 */
dp-arm815faa82017-05-05 12:21:03 +0100324 stp x19, x30, [sp, #-16]!
325 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100326
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000327 /*
328 * Load pointers to errata lock and printed flag. Call
329 * errata_needs_reporting to check whether this CPU needs to report
330 * errata status pertaining to its class.
331 */
332 ldr x0, [x1, #CPU_ERRATA_LOCK]
333 ldr x1, [x1, #CPU_ERRATA_PRINTED]
334 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100335 mov x1, x19
336 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000337 cbnz x0, .Lprint
338#endif
339.Lnoprint:
340 ret
341.Lprint:
342 /* Jump to errata reporting function for this CPU */
343 br x1
344endfunc print_errata_status
345#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000346
347/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100348 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000349 *
350 * This function returns:
351 * - ERRATA_APPLIES when firmware mitigation is required.
352 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
353 * - ERRATA_MISSING when firmware mitigation would be required but
354 * is not compiled in.
355 *
356 * NOTE: Must be called only after cpu_ops have been initialized
357 * in per-CPU data.
358 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100359 .globl check_wa_cve_2017_5715
360func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000361 mrs x0, tpidr_el3
362#if ENABLE_ASSERTIONS
363 cmp x0, #0
364 ASM_ASSERT(ne)
365#endif
366 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700367#if ENABLE_ASSERTIONS
368 cmp x0, #0
369 ASM_ASSERT(ne)
370#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000371 ldr x0, [x0, #CPU_EXTRA1_FUNC]
372 /*
373 * If the reserved function pointer is NULL, this CPU
374 * is unaffected by CVE-2017-5715 so bail out.
375 */
Bipin Ravicaa2e052022-02-23 23:45:50 -0600376 cmp x0, #CPU_NO_EXTRA1_FUNC
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000377 beq 1f
378 br x0
3791:
380 mov x0, #ERRATA_NOT_APPLIES
381 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100382endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100383
384/*
385 * void *wa_cve_2018_3639_get_disable_ptr(void);
386 *
387 * Returns a function pointer which is used to disable mitigation
388 * for CVE-2018-3639.
389 * The function pointer is only returned on cores that employ
390 * dynamic mitigation. If the core uses static mitigation or is
391 * unaffected by CVE-2018-3639 this function returns NULL.
392 *
393 * NOTE: Must be called only after cpu_ops have been initialized
394 * in per-CPU data.
395 */
396 .globl wa_cve_2018_3639_get_disable_ptr
397func wa_cve_2018_3639_get_disable_ptr
398 mrs x0, tpidr_el3
399#if ENABLE_ASSERTIONS
400 cmp x0, #0
401 ASM_ASSERT(ne)
402#endif
403 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700404#if ENABLE_ASSERTIONS
405 cmp x0, #0
406 ASM_ASSERT(ne)
407#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100408 ldr x0, [x0, #CPU_EXTRA2_FUNC]
409 ret
410endfunc wa_cve_2018_3639_get_disable_ptr
Bipin Ravicaa2e052022-02-23 23:45:50 -0600411
412/*
413 * int check_smccc_arch_wa3_applies(void);
414 *
415 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
416 * CVE-2022-23960 for this CPU. It returns:
417 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
418 * the CVE.
419 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
420 * mitigate the CVE.
421 *
422 * NOTE: Must be called only after cpu_ops have been initialized
423 * in per-CPU data.
424 */
425 .globl check_smccc_arch_wa3_applies
426func check_smccc_arch_wa3_applies
427 mrs x0, tpidr_el3
428#if ENABLE_ASSERTIONS
429 cmp x0, #0
430 ASM_ASSERT(ne)
431#endif
432 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
433#if ENABLE_ASSERTIONS
434 cmp x0, #0
435 ASM_ASSERT(ne)
436#endif
437 ldr x0, [x0, #CPU_EXTRA3_FUNC]
438 /*
439 * If the reserved function pointer is NULL, this CPU
440 * is unaffected by CVE-2022-23960 so bail out.
441 */
442 cmp x0, #CPU_NO_EXTRA3_FUNC
443 beq 1f
444 br x0
4451:
446 mov x0, #ERRATA_NOT_APPLIES
447 ret
448endfunc check_smccc_arch_wa3_applies