blob: f92eeebae38176df6a433805b6fbf6991be5e041 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -06002 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000013#include <lib/cpus/cpu_ops.h>
Boyan Karatotev5d38cb32023-01-27 09:37:07 +000014#include <lib/cpus/errata.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010016
17 /* Reset fn is needed in BL at reset vector */
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -060018#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
19 (defined(IMAGE_BL2) && RESET_TO_BL2)
Soby Mathewc704cbc2014-08-14 11:33:56 +010020 /*
21 * The reset handler common to all platforms. After a matching
22 * cpu_ops structure entry is found, the correponding reset_handler
23 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000024 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010025 */
26 .globl reset_handler
27func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010028 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010029
Soby Mathewb5a63042015-01-29 12:00:58 +000030 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010031 bl plat_reset_handler
32
Soby Mathewc704cbc2014-08-14 11:33:56 +010033 /* Get the matching cpu_ops pointer */
34 bl get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +010035
36 /* Get the cpu_ops reset handler */
37 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010038 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010039 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000040
41 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010042 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100431:
Soby Mathewc0884332014-09-22 12:11:36 +010044 ret
Kévin Petita877c252015-03-24 14:03:57 +000045endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010046
Roberto Vargase0e99462017-10-30 14:43:43 +000047#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010048
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090049#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010050 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000051 * void prepare_cpu_pwr_dwn(unsigned int power_level)
52 *
53 * Prepare CPU power down function for all platforms. The function takes
54 * a domain level to be powered down as its parameter. After the cpu_ops
55 * pointer is retrieved from cpu_data, the handler for requested power
56 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010057 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000058 .globl prepare_cpu_pwr_dwn
59func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010060 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000061 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
62 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010063 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000064 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
65 cmp x0, x2
66 csel x2, x2, x0, hi
67
Soby Mathew8e2f2872014-08-14 12:49:05 +010068 mrs x1, tpidr_el3
69 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010070#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010071 cmp x0, #0
72 ASM_ASSERT(ne)
73#endif
74
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000075 /* Get the appropriate power down handler */
76 mov x1, #CPU_PWR_DWN_OPS
77 add x1, x1, x2, lsl #3
78 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070079#if ENABLE_ASSERTIONS
80 cmp x1, #0
81 ASM_ASSERT(ne)
82#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010083 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000084endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010085
86
87 /*
88 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000089 * in cpu_data. This can be called without a runtime stack, but may
90 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010091 * clobbers: x0 - x6, x10
92 */
93 .globl init_cpu_ops
94func init_cpu_ops
95 mrs x6, tpidr_el3
96 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
97 cbnz x0, 1f
98 mov x10, x30
99 bl get_cpu_ops_ptr
Soby Mathew7d861ea2014-11-18 10:14:14 +0000100 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100101 mov x30, x10
1021:
103 ret
Kévin Petita877c252015-03-24 14:03:57 +0000104endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100105#endif /* IMAGE_BL31 */
106
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900107#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100108 /*
109 * The cpu specific registers which need to be reported in a crash
110 * are reported via cpu_ops cpu_reg_dump function. After a matching
111 * cpu_ops structure entry is found, the correponding cpu_reg_dump
112 * in the cpu_ops is invoked.
113 */
114 .globl do_cpu_reg_dump
115func do_cpu_reg_dump
116 mov x16, x30
117
118 /* Get the matching cpu_ops pointer */
119 bl get_cpu_ops_ptr
120 cbz x0, 1f
121
122 /* Get the cpu_ops cpu_reg_dump */
123 ldr x2, [x0, #CPU_REG_DUMP]
124 cbz x2, 1f
125 blr x2
1261:
127 mov x30, x16
128 ret
Kévin Petita877c252015-03-24 14:03:57 +0000129endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100130#endif
131
Soby Mathewc704cbc2014-08-14 11:33:56 +0100132 /*
133 * The below function returns the cpu_ops structure matching the
134 * midr of the core. It reads the MIDR_EL1 and finds the matching
135 * entry in cpu_ops entries. Only the implementation and part number
136 * are used to match the entries.
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100137 *
138 * If cpu_ops for the MIDR_EL1 cannot be found and
139 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
140 * default cpu_ops with an MIDR value of 0.
Olivier Deprez7d0299f2021-05-25 12:06:03 +0200141 * (Implementation number 0x0 should be reserved for software use
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100142 * and therefore no clashes should happen with that default value).
143 *
Soby Mathewc704cbc2014-08-14 11:33:56 +0100144 * Return :
145 * x0 - The matching cpu_ops pointer on Success
146 * x0 - 0 on failure.
147 * Clobbers : x0 - x5
148 */
149 .globl get_cpu_ops_ptr
150func get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100151 /* Read the MIDR_EL1 */
152 mrs x2, midr_el1
153 mov_imm x3, CPU_IMPL_PN_MASK
154
155 /* Retain only the implementation and part number using mask */
156 and w2, w2, w3
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100157
158 /* Get the cpu_ops end location */
159 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
160
161 /* Initialize the return parameter */
162 mov x0, #0
Soby Mathewc704cbc2014-08-14 11:33:56 +01001631:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100164 /* Get the cpu_ops start location */
165 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
166
1672:
Soby Mathewc704cbc2014-08-14 11:33:56 +0100168 /* Check if we have reached end of list */
169 cmp x4, x5
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100170 b.eq search_def_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100171
172 /* load the midr from the cpu_ops */
173 ldr x1, [x4], #CPU_OPS_SIZE
174 and w1, w1, w3
175
176 /* Check if midr matches to midr of this core */
177 cmp w1, w2
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100178 b.ne 2b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100179
180 /* Subtract the increment and offset to get the cpu-ops pointer */
181 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700182#if ENABLE_ASSERTIONS
183 cmp x0, #0
184 ASM_ASSERT(ne)
185#endif
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100186#ifdef SUPPORT_UNKNOWN_MPID
187 cbnz x2, exit_mpid_found
188 /* Mark the unsupported MPID flag */
189 adrp x1, unsupported_mpid_flag
190 add x1, x1, :lo12:unsupported_mpid_flag
191 str w2, [x1]
192exit_mpid_found:
193#endif
194 ret
195
196 /*
197 * Search again for a default pointer (MIDR = 0x0)
198 * or return error if already searched.
199 */
200search_def_ptr:
201#ifdef SUPPORT_UNKNOWN_MPID
202 cbz x2, error_exit
203 mov x2, #0
204 b 1b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100205error_exit:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100206#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100207 ret
Kévin Petita877c252015-03-24 14:03:57 +0000208endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100209
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000210/*
211 * Extract CPU revision and variant, and combine them into a single numeric for
212 * easier comparison.
213 */
214 .globl cpu_get_rev_var
215func cpu_get_rev_var
216 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100217
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000218 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000219 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
220 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000221 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000222 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
223 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000224 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000225 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
226 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
227 ret
228endfunc cpu_get_rev_var
229
230/*
231 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
232 * application purposes. If the revision-variant is less than or same as a given
233 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100234 *
235 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000236 */
237 .globl cpu_rev_var_ls
238func cpu_rev_var_ls
239 mov x2, #ERRATA_APPLIES
240 mov x3, #ERRATA_NOT_APPLIES
241 cmp x0, x1
242 csel x0, x2, x3, ls
243 ret
244endfunc cpu_rev_var_ls
245
Andre Przywara00eefd92016-10-06 16:54:53 +0100246/*
247 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
248 * application purposes. If the revision-variant is higher than or same as a
249 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100250 *
251 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100252 */
253 .globl cpu_rev_var_hs
254func cpu_rev_var_hs
255 mov x2, #ERRATA_APPLIES
256 mov x3, #ERRATA_NOT_APPLIES
257 cmp x0, x1
258 csel x0, x2, x3, hs
259 ret
260endfunc cpu_rev_var_hs
261
laurenw-arm94accd32019-08-20 15:51:24 -0500262/*
263 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
264 * application purposes. If the revision-variant is between or includes the given
265 * values, this indicates that errata applies; otherwise not.
266 *
267 * Shall clobber: x0-x4
268 */
269 .globl cpu_rev_var_range
270func cpu_rev_var_range
271 mov x3, #ERRATA_APPLIES
272 mov x4, #ERRATA_NOT_APPLIES
273 cmp x0, x1
274 csel x1, x3, x4, hs
275 cbz x1, 1f
276 cmp x0, x2
277 csel x1, x3, x4, ls
2781:
279 mov x0, x1
280 ret
281endfunc cpu_rev_var_range
282
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000283#if REPORT_ERRATA
284/*
285 * void print_errata_status(void);
286 *
287 * Function to print errata status for CPUs of its class. Must be called only:
288 *
289 * - with MMU and data caches are enabled;
290 * - after cpu_ops have been initialized in per-CPU data.
291 */
292 .globl print_errata_status
293func print_errata_status
294#ifdef IMAGE_BL1
295 /*
296 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
297 * directly.
298 */
299 stp xzr, x30, [sp, #-16]!
300 bl get_cpu_ops_ptr
301 ldp xzr, x30, [sp], #16
302 ldr x1, [x0, #CPU_ERRATA_FUNC]
303 cbnz x1, .Lprint
304#else
305 /*
306 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
307 * errata printing function. If it's non-NULL, jump to the function in
308 * turn.
309 */
310 mrs x0, tpidr_el3
Varun Wadekar718c8762019-10-01 09:34:10 -0700311#if ENABLE_ASSERTIONS
312 cmp x0, #0
313 ASM_ASSERT(ne)
314#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000315 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700316#if ENABLE_ASSERTIONS
317 cmp x1, #0
318 ASM_ASSERT(ne)
319#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000320 ldr x0, [x1, #CPU_ERRATA_FUNC]
321 cbz x0, .Lnoprint
322
323 /*
324 * Printing errata status requires atomically testing the printed flag.
325 */
dp-arm815faa82017-05-05 12:21:03 +0100326 stp x19, x30, [sp, #-16]!
327 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100328
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000329 /*
330 * Load pointers to errata lock and printed flag. Call
331 * errata_needs_reporting to check whether this CPU needs to report
332 * errata status pertaining to its class.
333 */
334 ldr x0, [x1, #CPU_ERRATA_LOCK]
335 ldr x1, [x1, #CPU_ERRATA_PRINTED]
336 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100337 mov x1, x19
338 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000339 cbnz x0, .Lprint
340#endif
341.Lnoprint:
342 ret
343.Lprint:
344 /* Jump to errata reporting function for this CPU */
345 br x1
346endfunc print_errata_status
347#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000348
349/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100350 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000351 *
352 * This function returns:
353 * - ERRATA_APPLIES when firmware mitigation is required.
354 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
355 * - ERRATA_MISSING when firmware mitigation would be required but
356 * is not compiled in.
357 *
358 * NOTE: Must be called only after cpu_ops have been initialized
359 * in per-CPU data.
360 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100361 .globl check_wa_cve_2017_5715
362func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000363 mrs x0, tpidr_el3
364#if ENABLE_ASSERTIONS
365 cmp x0, #0
366 ASM_ASSERT(ne)
367#endif
368 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700369#if ENABLE_ASSERTIONS
370 cmp x0, #0
371 ASM_ASSERT(ne)
372#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000373 ldr x0, [x0, #CPU_EXTRA1_FUNC]
374 /*
375 * If the reserved function pointer is NULL, this CPU
376 * is unaffected by CVE-2017-5715 so bail out.
377 */
Bipin Ravicaa2e052022-02-23 23:45:50 -0600378 cmp x0, #CPU_NO_EXTRA1_FUNC
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000379 beq 1f
380 br x0
3811:
382 mov x0, #ERRATA_NOT_APPLIES
383 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100384endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100385
386/*
387 * void *wa_cve_2018_3639_get_disable_ptr(void);
388 *
389 * Returns a function pointer which is used to disable mitigation
390 * for CVE-2018-3639.
391 * The function pointer is only returned on cores that employ
392 * dynamic mitigation. If the core uses static mitigation or is
393 * unaffected by CVE-2018-3639 this function returns NULL.
394 *
395 * NOTE: Must be called only after cpu_ops have been initialized
396 * in per-CPU data.
397 */
398 .globl wa_cve_2018_3639_get_disable_ptr
399func wa_cve_2018_3639_get_disable_ptr
400 mrs x0, tpidr_el3
401#if ENABLE_ASSERTIONS
402 cmp x0, #0
403 ASM_ASSERT(ne)
404#endif
405 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700406#if ENABLE_ASSERTIONS
407 cmp x0, #0
408 ASM_ASSERT(ne)
409#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100410 ldr x0, [x0, #CPU_EXTRA2_FUNC]
411 ret
412endfunc wa_cve_2018_3639_get_disable_ptr
Bipin Ravicaa2e052022-02-23 23:45:50 -0600413
414/*
415 * int check_smccc_arch_wa3_applies(void);
416 *
417 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
418 * CVE-2022-23960 for this CPU. It returns:
419 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
420 * the CVE.
421 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
422 * mitigate the CVE.
423 *
424 * NOTE: Must be called only after cpu_ops have been initialized
425 * in per-CPU data.
426 */
427 .globl check_smccc_arch_wa3_applies
428func check_smccc_arch_wa3_applies
429 mrs x0, tpidr_el3
430#if ENABLE_ASSERTIONS
431 cmp x0, #0
432 ASM_ASSERT(ne)
433#endif
434 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
435#if ENABLE_ASSERTIONS
436 cmp x0, #0
437 ASM_ASSERT(ne)
438#endif
439 ldr x0, [x0, #CPU_EXTRA3_FUNC]
440 /*
441 * If the reserved function pointer is NULL, this CPU
442 * is unaffected by CVE-2022-23960 so bail out.
443 */
444 cmp x0, #CPU_NO_EXTRA3_FUNC
445 beq 1f
446 br x0
4471:
448 mov x0, #ERRATA_NOT_APPLIES
449 ret
450endfunc check_smccc_arch_wa3_applies