blob: 0a03e381f96dade3e521dbb9a264c63bfa1dd0e0 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -06002 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010015
16 /* Reset fn is needed in BL at reset vector */
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -060017#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
18 (defined(IMAGE_BL2) && RESET_TO_BL2)
Soby Mathewc704cbc2014-08-14 11:33:56 +010019 /*
20 * The reset handler common to all platforms. After a matching
21 * cpu_ops structure entry is found, the correponding reset_handler
22 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000023 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010024 */
25 .globl reset_handler
26func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010027 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010028
Soby Mathewb5a63042015-01-29 12:00:58 +000029 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010030 bl plat_reset_handler
31
Soby Mathewc704cbc2014-08-14 11:33:56 +010032 /* Get the matching cpu_ops pointer */
33 bl get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +010034
35 /* Get the cpu_ops reset handler */
36 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010037 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010038 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000039
40 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010041 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100421:
Soby Mathewc0884332014-09-22 12:11:36 +010043 ret
Kévin Petita877c252015-03-24 14:03:57 +000044endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010045
Roberto Vargase0e99462017-10-30 14:43:43 +000046#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010047
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090048#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010049 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000050 * void prepare_cpu_pwr_dwn(unsigned int power_level)
51 *
52 * Prepare CPU power down function for all platforms. The function takes
53 * a domain level to be powered down as its parameter. After the cpu_ops
54 * pointer is retrieved from cpu_data, the handler for requested power
55 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010056 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000057 .globl prepare_cpu_pwr_dwn
58func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010059 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000060 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
61 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010062 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000063 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
64 cmp x0, x2
65 csel x2, x2, x0, hi
66
Soby Mathew8e2f2872014-08-14 12:49:05 +010067 mrs x1, tpidr_el3
68 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010069#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010070 cmp x0, #0
71 ASM_ASSERT(ne)
72#endif
73
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000074 /* Get the appropriate power down handler */
75 mov x1, #CPU_PWR_DWN_OPS
76 add x1, x1, x2, lsl #3
77 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070078#if ENABLE_ASSERTIONS
79 cmp x1, #0
80 ASM_ASSERT(ne)
81#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010082 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000083endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010084
85
86 /*
87 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000088 * in cpu_data. This can be called without a runtime stack, but may
89 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010090 * clobbers: x0 - x6, x10
91 */
92 .globl init_cpu_ops
93func init_cpu_ops
94 mrs x6, tpidr_el3
95 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
96 cbnz x0, 1f
97 mov x10, x30
98 bl get_cpu_ops_ptr
Soby Mathew7d861ea2014-11-18 10:14:14 +000099 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100100 mov x30, x10
1011:
102 ret
Kévin Petita877c252015-03-24 14:03:57 +0000103endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100104#endif /* IMAGE_BL31 */
105
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900106#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100107 /*
108 * The cpu specific registers which need to be reported in a crash
109 * are reported via cpu_ops cpu_reg_dump function. After a matching
110 * cpu_ops structure entry is found, the correponding cpu_reg_dump
111 * in the cpu_ops is invoked.
112 */
113 .globl do_cpu_reg_dump
114func do_cpu_reg_dump
115 mov x16, x30
116
117 /* Get the matching cpu_ops pointer */
118 bl get_cpu_ops_ptr
119 cbz x0, 1f
120
121 /* Get the cpu_ops cpu_reg_dump */
122 ldr x2, [x0, #CPU_REG_DUMP]
123 cbz x2, 1f
124 blr x2
1251:
126 mov x30, x16
127 ret
Kévin Petita877c252015-03-24 14:03:57 +0000128endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100129#endif
130
Soby Mathewc704cbc2014-08-14 11:33:56 +0100131 /*
132 * The below function returns the cpu_ops structure matching the
133 * midr of the core. It reads the MIDR_EL1 and finds the matching
134 * entry in cpu_ops entries. Only the implementation and part number
135 * are used to match the entries.
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100136 *
137 * If cpu_ops for the MIDR_EL1 cannot be found and
138 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
139 * default cpu_ops with an MIDR value of 0.
Olivier Deprez7d0299f2021-05-25 12:06:03 +0200140 * (Implementation number 0x0 should be reserved for software use
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100141 * and therefore no clashes should happen with that default value).
142 *
Soby Mathewc704cbc2014-08-14 11:33:56 +0100143 * Return :
144 * x0 - The matching cpu_ops pointer on Success
145 * x0 - 0 on failure.
146 * Clobbers : x0 - x5
147 */
148 .globl get_cpu_ops_ptr
149func get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100150 /* Read the MIDR_EL1 */
151 mrs x2, midr_el1
152 mov_imm x3, CPU_IMPL_PN_MASK
153
154 /* Retain only the implementation and part number using mask */
155 and w2, w2, w3
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100156
157 /* Get the cpu_ops end location */
158 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
159
160 /* Initialize the return parameter */
161 mov x0, #0
Soby Mathewc704cbc2014-08-14 11:33:56 +01001621:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100163 /* Get the cpu_ops start location */
164 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
165
1662:
Soby Mathewc704cbc2014-08-14 11:33:56 +0100167 /* Check if we have reached end of list */
168 cmp x4, x5
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100169 b.eq search_def_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100170
171 /* load the midr from the cpu_ops */
172 ldr x1, [x4], #CPU_OPS_SIZE
173 and w1, w1, w3
174
175 /* Check if midr matches to midr of this core */
176 cmp w1, w2
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100177 b.ne 2b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100178
179 /* Subtract the increment and offset to get the cpu-ops pointer */
180 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700181#if ENABLE_ASSERTIONS
182 cmp x0, #0
183 ASM_ASSERT(ne)
184#endif
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100185#ifdef SUPPORT_UNKNOWN_MPID
186 cbnz x2, exit_mpid_found
187 /* Mark the unsupported MPID flag */
188 adrp x1, unsupported_mpid_flag
189 add x1, x1, :lo12:unsupported_mpid_flag
190 str w2, [x1]
191exit_mpid_found:
192#endif
193 ret
194
195 /*
196 * Search again for a default pointer (MIDR = 0x0)
197 * or return error if already searched.
198 */
199search_def_ptr:
200#ifdef SUPPORT_UNKNOWN_MPID
201 cbz x2, error_exit
202 mov x2, #0
203 b 1b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100204error_exit:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100205#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100206 ret
Kévin Petita877c252015-03-24 14:03:57 +0000207endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100208
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000209/*
210 * Extract CPU revision and variant, and combine them into a single numeric for
211 * easier comparison.
212 */
213 .globl cpu_get_rev_var
214func cpu_get_rev_var
215 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100216
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000217 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000218 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
219 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000220 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000221 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
222 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000223 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000224 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
225 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
226 ret
227endfunc cpu_get_rev_var
228
229/*
230 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
231 * application purposes. If the revision-variant is less than or same as a given
232 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100233 *
234 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000235 */
236 .globl cpu_rev_var_ls
237func cpu_rev_var_ls
238 mov x2, #ERRATA_APPLIES
239 mov x3, #ERRATA_NOT_APPLIES
240 cmp x0, x1
241 csel x0, x2, x3, ls
242 ret
243endfunc cpu_rev_var_ls
244
Andre Przywara00eefd92016-10-06 16:54:53 +0100245/*
246 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
247 * application purposes. If the revision-variant is higher than or same as a
248 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100249 *
250 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100251 */
252 .globl cpu_rev_var_hs
253func cpu_rev_var_hs
254 mov x2, #ERRATA_APPLIES
255 mov x3, #ERRATA_NOT_APPLIES
256 cmp x0, x1
257 csel x0, x2, x3, hs
258 ret
259endfunc cpu_rev_var_hs
260
laurenw-arm94accd32019-08-20 15:51:24 -0500261/*
262 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
263 * application purposes. If the revision-variant is between or includes the given
264 * values, this indicates that errata applies; otherwise not.
265 *
266 * Shall clobber: x0-x4
267 */
268 .globl cpu_rev_var_range
269func cpu_rev_var_range
270 mov x3, #ERRATA_APPLIES
271 mov x4, #ERRATA_NOT_APPLIES
272 cmp x0, x1
273 csel x1, x3, x4, hs
274 cbz x1, 1f
275 cmp x0, x2
276 csel x1, x3, x4, ls
2771:
278 mov x0, x1
279 ret
280endfunc cpu_rev_var_range
281
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000282#if REPORT_ERRATA
283/*
284 * void print_errata_status(void);
285 *
286 * Function to print errata status for CPUs of its class. Must be called only:
287 *
288 * - with MMU and data caches are enabled;
289 * - after cpu_ops have been initialized in per-CPU data.
290 */
291 .globl print_errata_status
292func print_errata_status
293#ifdef IMAGE_BL1
294 /*
295 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
296 * directly.
297 */
298 stp xzr, x30, [sp, #-16]!
299 bl get_cpu_ops_ptr
300 ldp xzr, x30, [sp], #16
301 ldr x1, [x0, #CPU_ERRATA_FUNC]
302 cbnz x1, .Lprint
303#else
304 /*
305 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
306 * errata printing function. If it's non-NULL, jump to the function in
307 * turn.
308 */
309 mrs x0, tpidr_el3
Varun Wadekar718c8762019-10-01 09:34:10 -0700310#if ENABLE_ASSERTIONS
311 cmp x0, #0
312 ASM_ASSERT(ne)
313#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000314 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700315#if ENABLE_ASSERTIONS
316 cmp x1, #0
317 ASM_ASSERT(ne)
318#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000319 ldr x0, [x1, #CPU_ERRATA_FUNC]
320 cbz x0, .Lnoprint
321
322 /*
323 * Printing errata status requires atomically testing the printed flag.
324 */
dp-arm815faa82017-05-05 12:21:03 +0100325 stp x19, x30, [sp, #-16]!
326 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100327
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000328 /*
329 * Load pointers to errata lock and printed flag. Call
330 * errata_needs_reporting to check whether this CPU needs to report
331 * errata status pertaining to its class.
332 */
333 ldr x0, [x1, #CPU_ERRATA_LOCK]
334 ldr x1, [x1, #CPU_ERRATA_PRINTED]
335 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100336 mov x1, x19
337 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000338 cbnz x0, .Lprint
339#endif
340.Lnoprint:
341 ret
342.Lprint:
343 /* Jump to errata reporting function for this CPU */
344 br x1
345endfunc print_errata_status
346#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000347
348/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100349 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000350 *
351 * This function returns:
352 * - ERRATA_APPLIES when firmware mitigation is required.
353 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
354 * - ERRATA_MISSING when firmware mitigation would be required but
355 * is not compiled in.
356 *
357 * NOTE: Must be called only after cpu_ops have been initialized
358 * in per-CPU data.
359 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100360 .globl check_wa_cve_2017_5715
361func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000362 mrs x0, tpidr_el3
363#if ENABLE_ASSERTIONS
364 cmp x0, #0
365 ASM_ASSERT(ne)
366#endif
367 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700368#if ENABLE_ASSERTIONS
369 cmp x0, #0
370 ASM_ASSERT(ne)
371#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000372 ldr x0, [x0, #CPU_EXTRA1_FUNC]
373 /*
374 * If the reserved function pointer is NULL, this CPU
375 * is unaffected by CVE-2017-5715 so bail out.
376 */
Bipin Ravicaa2e052022-02-23 23:45:50 -0600377 cmp x0, #CPU_NO_EXTRA1_FUNC
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000378 beq 1f
379 br x0
3801:
381 mov x0, #ERRATA_NOT_APPLIES
382 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100383endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100384
385/*
386 * void *wa_cve_2018_3639_get_disable_ptr(void);
387 *
388 * Returns a function pointer which is used to disable mitigation
389 * for CVE-2018-3639.
390 * The function pointer is only returned on cores that employ
391 * dynamic mitigation. If the core uses static mitigation or is
392 * unaffected by CVE-2018-3639 this function returns NULL.
393 *
394 * NOTE: Must be called only after cpu_ops have been initialized
395 * in per-CPU data.
396 */
397 .globl wa_cve_2018_3639_get_disable_ptr
398func wa_cve_2018_3639_get_disable_ptr
399 mrs x0, tpidr_el3
400#if ENABLE_ASSERTIONS
401 cmp x0, #0
402 ASM_ASSERT(ne)
403#endif
404 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700405#if ENABLE_ASSERTIONS
406 cmp x0, #0
407 ASM_ASSERT(ne)
408#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100409 ldr x0, [x0, #CPU_EXTRA2_FUNC]
410 ret
411endfunc wa_cve_2018_3639_get_disable_ptr
Bipin Ravicaa2e052022-02-23 23:45:50 -0600412
413/*
414 * int check_smccc_arch_wa3_applies(void);
415 *
416 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
417 * CVE-2022-23960 for this CPU. It returns:
418 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
419 * the CVE.
420 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
421 * mitigate the CVE.
422 *
423 * NOTE: Must be called only after cpu_ops have been initialized
424 * in per-CPU data.
425 */
426 .globl check_smccc_arch_wa3_applies
427func check_smccc_arch_wa3_applies
428 mrs x0, tpidr_el3
429#if ENABLE_ASSERTIONS
430 cmp x0, #0
431 ASM_ASSERT(ne)
432#endif
433 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
434#if ENABLE_ASSERTIONS
435 cmp x0, #0
436 ASM_ASSERT(ne)
437#endif
438 ldr x0, [x0, #CPU_EXTRA3_FUNC]
439 /*
440 * If the reserved function pointer is NULL, this CPU
441 * is unaffected by CVE-2022-23960 so bail out.
442 */
443 cmp x0, #CPU_NO_EXTRA3_FUNC
444 beq 1f
445 br x0
4461:
447 mov x0, #ERRATA_NOT_APPLIES
448 ret
449endfunc check_smccc_arch_wa3_applies