blob: ee880f73024915a544302e59415a163dfb72b7cc [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -06002 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010015
16 /* Reset fn is needed in BL at reset vector */
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -060017#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
18 (defined(IMAGE_BL2) && RESET_TO_BL2)
Soby Mathewc704cbc2014-08-14 11:33:56 +010019 /*
20 * The reset handler common to all platforms. After a matching
21 * cpu_ops structure entry is found, the correponding reset_handler
22 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000023 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010024 */
25 .globl reset_handler
26func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010027 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010028
Soby Mathewb5a63042015-01-29 12:00:58 +000029 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010030 bl plat_reset_handler
31
Soby Mathewc704cbc2014-08-14 11:33:56 +010032 /* Get the matching cpu_ops pointer */
33 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010034#if ENABLE_ASSERTIONS
Soby Mathewc704cbc2014-08-14 11:33:56 +010035 cmp x0, #0
36 ASM_ASSERT(ne)
37#endif
38
39 /* Get the cpu_ops reset handler */
40 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010041 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010042 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000043
44 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010045 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100461:
Soby Mathewc0884332014-09-22 12:11:36 +010047 ret
Kévin Petita877c252015-03-24 14:03:57 +000048endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010049
Roberto Vargase0e99462017-10-30 14:43:43 +000050#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010051
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090052#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010053 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000054 * void prepare_cpu_pwr_dwn(unsigned int power_level)
55 *
56 * Prepare CPU power down function for all platforms. The function takes
57 * a domain level to be powered down as its parameter. After the cpu_ops
58 * pointer is retrieved from cpu_data, the handler for requested power
59 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010060 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000061 .globl prepare_cpu_pwr_dwn
62func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010063 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000064 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
65 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010066 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000067 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
68 cmp x0, x2
69 csel x2, x2, x0, hi
70
Soby Mathew8e2f2872014-08-14 12:49:05 +010071 mrs x1, tpidr_el3
72 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010073#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010074 cmp x0, #0
75 ASM_ASSERT(ne)
76#endif
77
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000078 /* Get the appropriate power down handler */
79 mov x1, #CPU_PWR_DWN_OPS
80 add x1, x1, x2, lsl #3
81 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070082#if ENABLE_ASSERTIONS
83 cmp x1, #0
84 ASM_ASSERT(ne)
85#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010086 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000087endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010088
89
90 /*
91 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000092 * in cpu_data. This can be called without a runtime stack, but may
93 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010094 * clobbers: x0 - x6, x10
95 */
96 .globl init_cpu_ops
97func init_cpu_ops
98 mrs x6, tpidr_el3
99 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
100 cbnz x0, 1f
101 mov x10, x30
102 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +0100103#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +0100104 cmp x0, #0
105 ASM_ASSERT(ne)
106#endif
Soby Mathew7d861ea2014-11-18 10:14:14 +0000107 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100108 mov x30, x10
1091:
110 ret
Kévin Petita877c252015-03-24 14:03:57 +0000111endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100112#endif /* IMAGE_BL31 */
113
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900114#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100115 /*
116 * The cpu specific registers which need to be reported in a crash
117 * are reported via cpu_ops cpu_reg_dump function. After a matching
118 * cpu_ops structure entry is found, the correponding cpu_reg_dump
119 * in the cpu_ops is invoked.
120 */
121 .globl do_cpu_reg_dump
122func do_cpu_reg_dump
123 mov x16, x30
124
125 /* Get the matching cpu_ops pointer */
126 bl get_cpu_ops_ptr
127 cbz x0, 1f
128
129 /* Get the cpu_ops cpu_reg_dump */
130 ldr x2, [x0, #CPU_REG_DUMP]
131 cbz x2, 1f
132 blr x2
1331:
134 mov x30, x16
135 ret
Kévin Petita877c252015-03-24 14:03:57 +0000136endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100137#endif
138
Soby Mathewc704cbc2014-08-14 11:33:56 +0100139 /*
140 * The below function returns the cpu_ops structure matching the
141 * midr of the core. It reads the MIDR_EL1 and finds the matching
142 * entry in cpu_ops entries. Only the implementation and part number
143 * are used to match the entries.
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100144 *
145 * If cpu_ops for the MIDR_EL1 cannot be found and
146 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
147 * default cpu_ops with an MIDR value of 0.
Olivier Deprez7d0299f2021-05-25 12:06:03 +0200148 * (Implementation number 0x0 should be reserved for software use
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100149 * and therefore no clashes should happen with that default value).
150 *
Soby Mathewc704cbc2014-08-14 11:33:56 +0100151 * Return :
152 * x0 - The matching cpu_ops pointer on Success
153 * x0 - 0 on failure.
154 * Clobbers : x0 - x5
155 */
156 .globl get_cpu_ops_ptr
157func get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100158 /* Read the MIDR_EL1 */
159 mrs x2, midr_el1
160 mov_imm x3, CPU_IMPL_PN_MASK
161
162 /* Retain only the implementation and part number using mask */
163 and w2, w2, w3
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100164
165 /* Get the cpu_ops end location */
166 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
167
168 /* Initialize the return parameter */
169 mov x0, #0
Soby Mathewc704cbc2014-08-14 11:33:56 +01001701:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100171 /* Get the cpu_ops start location */
172 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
173
1742:
Soby Mathewc704cbc2014-08-14 11:33:56 +0100175 /* Check if we have reached end of list */
176 cmp x4, x5
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100177 b.eq search_def_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100178
179 /* load the midr from the cpu_ops */
180 ldr x1, [x4], #CPU_OPS_SIZE
181 and w1, w1, w3
182
183 /* Check if midr matches to midr of this core */
184 cmp w1, w2
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100185 b.ne 2b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100186
187 /* Subtract the increment and offset to get the cpu-ops pointer */
188 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700189#if ENABLE_ASSERTIONS
190 cmp x0, #0
191 ASM_ASSERT(ne)
192#endif
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100193#ifdef SUPPORT_UNKNOWN_MPID
194 cbnz x2, exit_mpid_found
195 /* Mark the unsupported MPID flag */
196 adrp x1, unsupported_mpid_flag
197 add x1, x1, :lo12:unsupported_mpid_flag
198 str w2, [x1]
199exit_mpid_found:
200#endif
201 ret
202
203 /*
204 * Search again for a default pointer (MIDR = 0x0)
205 * or return error if already searched.
206 */
207search_def_ptr:
208#ifdef SUPPORT_UNKNOWN_MPID
209 cbz x2, error_exit
210 mov x2, #0
211 b 1b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100212error_exit:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100213#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100214 ret
Kévin Petita877c252015-03-24 14:03:57 +0000215endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100216
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000217/*
218 * Extract CPU revision and variant, and combine them into a single numeric for
219 * easier comparison.
220 */
221 .globl cpu_get_rev_var
222func cpu_get_rev_var
223 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100224
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000225 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000226 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
227 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000228 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000229 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
230 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000231 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000232 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
233 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
234 ret
235endfunc cpu_get_rev_var
236
237/*
238 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
239 * application purposes. If the revision-variant is less than or same as a given
240 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100241 *
242 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000243 */
244 .globl cpu_rev_var_ls
245func cpu_rev_var_ls
246 mov x2, #ERRATA_APPLIES
247 mov x3, #ERRATA_NOT_APPLIES
248 cmp x0, x1
249 csel x0, x2, x3, ls
250 ret
251endfunc cpu_rev_var_ls
252
Andre Przywara00eefd92016-10-06 16:54:53 +0100253/*
254 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
255 * application purposes. If the revision-variant is higher than or same as a
256 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100257 *
258 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100259 */
260 .globl cpu_rev_var_hs
261func cpu_rev_var_hs
262 mov x2, #ERRATA_APPLIES
263 mov x3, #ERRATA_NOT_APPLIES
264 cmp x0, x1
265 csel x0, x2, x3, hs
266 ret
267endfunc cpu_rev_var_hs
268
laurenw-arm94accd32019-08-20 15:51:24 -0500269/*
270 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
271 * application purposes. If the revision-variant is between or includes the given
272 * values, this indicates that errata applies; otherwise not.
273 *
274 * Shall clobber: x0-x4
275 */
276 .globl cpu_rev_var_range
277func cpu_rev_var_range
278 mov x3, #ERRATA_APPLIES
279 mov x4, #ERRATA_NOT_APPLIES
280 cmp x0, x1
281 csel x1, x3, x4, hs
282 cbz x1, 1f
283 cmp x0, x2
284 csel x1, x3, x4, ls
2851:
286 mov x0, x1
287 ret
288endfunc cpu_rev_var_range
289
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000290#if REPORT_ERRATA
291/*
292 * void print_errata_status(void);
293 *
294 * Function to print errata status for CPUs of its class. Must be called only:
295 *
296 * - with MMU and data caches are enabled;
297 * - after cpu_ops have been initialized in per-CPU data.
298 */
299 .globl print_errata_status
300func print_errata_status
301#ifdef IMAGE_BL1
302 /*
303 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
304 * directly.
305 */
306 stp xzr, x30, [sp, #-16]!
307 bl get_cpu_ops_ptr
308 ldp xzr, x30, [sp], #16
309 ldr x1, [x0, #CPU_ERRATA_FUNC]
310 cbnz x1, .Lprint
311#else
312 /*
313 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
314 * errata printing function. If it's non-NULL, jump to the function in
315 * turn.
316 */
317 mrs x0, tpidr_el3
Varun Wadekar718c8762019-10-01 09:34:10 -0700318#if ENABLE_ASSERTIONS
319 cmp x0, #0
320 ASM_ASSERT(ne)
321#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000322 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700323#if ENABLE_ASSERTIONS
324 cmp x1, #0
325 ASM_ASSERT(ne)
326#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000327 ldr x0, [x1, #CPU_ERRATA_FUNC]
328 cbz x0, .Lnoprint
329
330 /*
331 * Printing errata status requires atomically testing the printed flag.
332 */
dp-arm815faa82017-05-05 12:21:03 +0100333 stp x19, x30, [sp, #-16]!
334 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100335
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000336 /*
337 * Load pointers to errata lock and printed flag. Call
338 * errata_needs_reporting to check whether this CPU needs to report
339 * errata status pertaining to its class.
340 */
341 ldr x0, [x1, #CPU_ERRATA_LOCK]
342 ldr x1, [x1, #CPU_ERRATA_PRINTED]
343 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100344 mov x1, x19
345 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000346 cbnz x0, .Lprint
347#endif
348.Lnoprint:
349 ret
350.Lprint:
351 /* Jump to errata reporting function for this CPU */
352 br x1
353endfunc print_errata_status
354#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000355
356/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100357 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000358 *
359 * This function returns:
360 * - ERRATA_APPLIES when firmware mitigation is required.
361 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
362 * - ERRATA_MISSING when firmware mitigation would be required but
363 * is not compiled in.
364 *
365 * NOTE: Must be called only after cpu_ops have been initialized
366 * in per-CPU data.
367 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100368 .globl check_wa_cve_2017_5715
369func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000370 mrs x0, tpidr_el3
371#if ENABLE_ASSERTIONS
372 cmp x0, #0
373 ASM_ASSERT(ne)
374#endif
375 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700376#if ENABLE_ASSERTIONS
377 cmp x0, #0
378 ASM_ASSERT(ne)
379#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000380 ldr x0, [x0, #CPU_EXTRA1_FUNC]
381 /*
382 * If the reserved function pointer is NULL, this CPU
383 * is unaffected by CVE-2017-5715 so bail out.
384 */
Bipin Ravicaa2e052022-02-23 23:45:50 -0600385 cmp x0, #CPU_NO_EXTRA1_FUNC
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000386 beq 1f
387 br x0
3881:
389 mov x0, #ERRATA_NOT_APPLIES
390 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100391endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100392
393/*
394 * void *wa_cve_2018_3639_get_disable_ptr(void);
395 *
396 * Returns a function pointer which is used to disable mitigation
397 * for CVE-2018-3639.
398 * The function pointer is only returned on cores that employ
399 * dynamic mitigation. If the core uses static mitigation or is
400 * unaffected by CVE-2018-3639 this function returns NULL.
401 *
402 * NOTE: Must be called only after cpu_ops have been initialized
403 * in per-CPU data.
404 */
405 .globl wa_cve_2018_3639_get_disable_ptr
406func wa_cve_2018_3639_get_disable_ptr
407 mrs x0, tpidr_el3
408#if ENABLE_ASSERTIONS
409 cmp x0, #0
410 ASM_ASSERT(ne)
411#endif
412 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700413#if ENABLE_ASSERTIONS
414 cmp x0, #0
415 ASM_ASSERT(ne)
416#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100417 ldr x0, [x0, #CPU_EXTRA2_FUNC]
418 ret
419endfunc wa_cve_2018_3639_get_disable_ptr
Bipin Ravicaa2e052022-02-23 23:45:50 -0600420
421/*
422 * int check_smccc_arch_wa3_applies(void);
423 *
424 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
425 * CVE-2022-23960 for this CPU. It returns:
426 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
427 * the CVE.
428 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
429 * mitigate the CVE.
430 *
431 * NOTE: Must be called only after cpu_ops have been initialized
432 * in per-CPU data.
433 */
434 .globl check_smccc_arch_wa3_applies
435func check_smccc_arch_wa3_applies
436 mrs x0, tpidr_el3
437#if ENABLE_ASSERTIONS
438 cmp x0, #0
439 ASM_ASSERT(ne)
440#endif
441 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
442#if ENABLE_ASSERTIONS
443 cmp x0, #0
444 ASM_ASSERT(ne)
445#endif
446 ldr x0, [x0, #CPU_EXTRA3_FUNC]
447 /*
448 * If the reserved function pointer is NULL, this CPU
449 * is unaffected by CVE-2022-23960 so bail out.
450 */
451 cmp x0, #CPU_NO_EXTRA3_FUNC
452 beq 1f
453 br x0
4541:
455 mov x0, #ERRATA_NOT_APPLIES
456 ret
457endfunc check_smccc_arch_wa3_applies