blob: ead91c3888467fab3812345deed845f6e430ab8a [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Boyan Karatotev23f4ff92025-01-21 08:44:52 +00002 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000013#include <lib/cpus/cpu_ops.h>
Boyan Karatotev5d38cb32023-01-27 09:37:07 +000014#include <lib/cpus/errata.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010016
17 /* Reset fn is needed in BL at reset vector */
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -060018#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
19 (defined(IMAGE_BL2) && RESET_TO_BL2)
Soby Mathewc704cbc2014-08-14 11:33:56 +010020 /*
21 * The reset handler common to all platforms. After a matching
22 * cpu_ops structure entry is found, the correponding reset_handler
23 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000024 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010025 */
26 .globl reset_handler
27func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010028 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010029
Soby Mathewb5a63042015-01-29 12:00:58 +000030 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010031 bl plat_reset_handler
32
Soby Mathewc704cbc2014-08-14 11:33:56 +010033 /* Get the matching cpu_ops pointer */
34 bl get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +010035
Thaddeus Serna3868b232023-07-19 13:59:24 -050036#if ENABLE_ASSERTIONS
37 /*
38 * Assert if invalid cpu_ops obtained. If this is not valid, it may
39 * suggest that the proper CPU file hasn't been included.
40 */
41 cmp x0, #0
42 ASM_ASSERT(ne)
43#endif
44
Soby Mathewc704cbc2014-08-14 11:33:56 +010045 /* Get the cpu_ops reset handler */
46 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010047 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010048 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000049
50 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010051 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100521:
Soby Mathewc0884332014-09-22 12:11:36 +010053 ret
Kévin Petita877c252015-03-24 14:03:57 +000054endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010055
Roberto Vargase0e99462017-10-30 14:43:43 +000056#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010057
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090058#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010059 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000060 * void prepare_cpu_pwr_dwn(unsigned int power_level)
61 *
62 * Prepare CPU power down function for all platforms. The function takes
63 * a domain level to be powered down as its parameter. After the cpu_ops
64 * pointer is retrieved from cpu_data, the handler for requested power
65 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010066 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000067 .globl prepare_cpu_pwr_dwn
68func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010069 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000070 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
71 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010072 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000073 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
74 cmp x0, x2
75 csel x2, x2, x0, hi
76
Soby Mathew8e2f2872014-08-14 12:49:05 +010077 mrs x1, tpidr_el3
78 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010079#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010080 cmp x0, #0
81 ASM_ASSERT(ne)
82#endif
83
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000084 /* Get the appropriate power down handler */
85 mov x1, #CPU_PWR_DWN_OPS
86 add x1, x1, x2, lsl #3
87 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070088#if ENABLE_ASSERTIONS
89 cmp x1, #0
90 ASM_ASSERT(ne)
91#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010092 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000093endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010094
95
96 /*
97 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000098 * in cpu_data. This can be called without a runtime stack, but may
99 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +0100100 * clobbers: x0 - x6, x10
101 */
102 .globl init_cpu_ops
103func init_cpu_ops
104 mrs x6, tpidr_el3
105 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
106 cbnz x0, 1f
107 mov x10, x30
108 bl get_cpu_ops_ptr
Soby Mathew7d861ea2014-11-18 10:14:14 +0000109 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100110 mov x30, x10
1111:
112 ret
Kévin Petita877c252015-03-24 14:03:57 +0000113endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100114#endif /* IMAGE_BL31 */
115
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900116#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100117 /*
118 * The cpu specific registers which need to be reported in a crash
119 * are reported via cpu_ops cpu_reg_dump function. After a matching
120 * cpu_ops structure entry is found, the correponding cpu_reg_dump
121 * in the cpu_ops is invoked.
122 */
123 .globl do_cpu_reg_dump
124func do_cpu_reg_dump
125 mov x16, x30
126
127 /* Get the matching cpu_ops pointer */
128 bl get_cpu_ops_ptr
129 cbz x0, 1f
130
131 /* Get the cpu_ops cpu_reg_dump */
132 ldr x2, [x0, #CPU_REG_DUMP]
133 cbz x2, 1f
134 blr x2
1351:
136 mov x30, x16
137 ret
Kévin Petita877c252015-03-24 14:03:57 +0000138endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100139#endif
140
Soby Mathewc704cbc2014-08-14 11:33:56 +0100141 /*
142 * The below function returns the cpu_ops structure matching the
143 * midr of the core. It reads the MIDR_EL1 and finds the matching
144 * entry in cpu_ops entries. Only the implementation and part number
145 * are used to match the entries.
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100146 *
147 * If cpu_ops for the MIDR_EL1 cannot be found and
148 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
149 * default cpu_ops with an MIDR value of 0.
Olivier Deprez7d0299f2021-05-25 12:06:03 +0200150 * (Implementation number 0x0 should be reserved for software use
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100151 * and therefore no clashes should happen with that default value).
152 *
Soby Mathewc704cbc2014-08-14 11:33:56 +0100153 * Return :
154 * x0 - The matching cpu_ops pointer on Success
155 * x0 - 0 on failure.
156 * Clobbers : x0 - x5
157 */
158 .globl get_cpu_ops_ptr
159func get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100160 /* Read the MIDR_EL1 */
161 mrs x2, midr_el1
162 mov_imm x3, CPU_IMPL_PN_MASK
163
164 /* Retain only the implementation and part number using mask */
165 and w2, w2, w3
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100166
167 /* Get the cpu_ops end location */
developerff69a6a2024-02-21 13:58:51 +0800168 adr_l x5, (__CPU_OPS_END__ + CPU_MIDR)
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100169
170 /* Initialize the return parameter */
171 mov x0, #0
Soby Mathewc704cbc2014-08-14 11:33:56 +01001721:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100173 /* Get the cpu_ops start location */
developerff69a6a2024-02-21 13:58:51 +0800174 adr_l x4, (__CPU_OPS_START__ + CPU_MIDR)
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100175
1762:
Soby Mathewc704cbc2014-08-14 11:33:56 +0100177 /* Check if we have reached end of list */
178 cmp x4, x5
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100179 b.eq search_def_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100180
181 /* load the midr from the cpu_ops */
182 ldr x1, [x4], #CPU_OPS_SIZE
183 and w1, w1, w3
184
185 /* Check if midr matches to midr of this core */
186 cmp w1, w2
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100187 b.ne 2b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100188
189 /* Subtract the increment and offset to get the cpu-ops pointer */
190 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700191#if ENABLE_ASSERTIONS
192 cmp x0, #0
193 ASM_ASSERT(ne)
194#endif
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100195#ifdef SUPPORT_UNKNOWN_MPID
196 cbnz x2, exit_mpid_found
197 /* Mark the unsupported MPID flag */
198 adrp x1, unsupported_mpid_flag
199 add x1, x1, :lo12:unsupported_mpid_flag
200 str w2, [x1]
201exit_mpid_found:
202#endif
203 ret
204
205 /*
206 * Search again for a default pointer (MIDR = 0x0)
207 * or return error if already searched.
208 */
209search_def_ptr:
210#ifdef SUPPORT_UNKNOWN_MPID
211 cbz x2, error_exit
212 mov x2, #0
213 b 1b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100214error_exit:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100215#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100216 ret
Kévin Petita877c252015-03-24 14:03:57 +0000217endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100218
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000219 .globl cpu_get_rev_var
220func cpu_get_rev_var
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000221 get_rev_var x0, x1
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000222 ret
223endfunc cpu_get_rev_var
224
225/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100226 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000227 *
228 * This function returns:
229 * - ERRATA_APPLIES when firmware mitigation is required.
230 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
231 * - ERRATA_MISSING when firmware mitigation would be required but
232 * is not compiled in.
233 *
234 * NOTE: Must be called only after cpu_ops have been initialized
235 * in per-CPU data.
236 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100237 .globl check_wa_cve_2017_5715
238func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000239 mrs x0, tpidr_el3
240#if ENABLE_ASSERTIONS
241 cmp x0, #0
242 ASM_ASSERT(ne)
243#endif
244 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700245#if ENABLE_ASSERTIONS
246 cmp x0, #0
247 ASM_ASSERT(ne)
248#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000249 ldr x0, [x0, #CPU_EXTRA1_FUNC]
250 /*
251 * If the reserved function pointer is NULL, this CPU
252 * is unaffected by CVE-2017-5715 so bail out.
253 */
Bipin Ravicaa2e052022-02-23 23:45:50 -0600254 cmp x0, #CPU_NO_EXTRA1_FUNC
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000255 beq 1f
256 br x0
2571:
258 mov x0, #ERRATA_NOT_APPLIES
259 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100260endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100261
262/*
Arvind Ram Prakashfaa857d2025-01-28 17:21:17 -0600263 * int check_wa_cve_2024_7881(void);
264 *
265 * This function returns:
266 * - ERRATA_APPLIES when firmware mitigation is required.
267 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
268 * - ERRATA_MISSING when firmware mitigation would be required but
269 * is not compiled in.
270 *
271 * NOTE: Must be called only after cpu_ops have been initialized
272 * in per-CPU data.
273 */
274.globl check_wa_cve_2024_7881
275func check_wa_cve_2024_7881
276 mrs x0, tpidr_el3
277#if ENABLE_ASSERTIONS
278 cmp x0, #0
279 ASM_ASSERT(ne)
280#endif
281 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
282#if ENABLE_ASSERTIONS
283 cmp x0, #0
284 ASM_ASSERT(ne)
285#endif
286 ldr x0, [x0, #CPU_EXTRA4_FUNC]
287 /*
288 * If the reserved function pointer is NULL, this CPU
289 * is unaffected by CVE-2024-7881 so bail out.
290 */
291 cmp x0, #CPU_NO_EXTRA4_FUNC
292 beq 1f
293 br x0
2941:
295 mov x0, #ERRATA_NOT_APPLIES
296 ret
297endfunc check_wa_cve_2024_7881
298
299/*
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100300 * void *wa_cve_2018_3639_get_disable_ptr(void);
301 *
302 * Returns a function pointer which is used to disable mitigation
303 * for CVE-2018-3639.
304 * The function pointer is only returned on cores that employ
305 * dynamic mitigation. If the core uses static mitigation or is
306 * unaffected by CVE-2018-3639 this function returns NULL.
307 *
308 * NOTE: Must be called only after cpu_ops have been initialized
309 * in per-CPU data.
310 */
311 .globl wa_cve_2018_3639_get_disable_ptr
312func wa_cve_2018_3639_get_disable_ptr
313 mrs x0, tpidr_el3
314#if ENABLE_ASSERTIONS
315 cmp x0, #0
316 ASM_ASSERT(ne)
317#endif
318 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700319#if ENABLE_ASSERTIONS
320 cmp x0, #0
321 ASM_ASSERT(ne)
322#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100323 ldr x0, [x0, #CPU_EXTRA2_FUNC]
324 ret
325endfunc wa_cve_2018_3639_get_disable_ptr
Bipin Ravicaa2e052022-02-23 23:45:50 -0600326
327/*
328 * int check_smccc_arch_wa3_applies(void);
329 *
330 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
331 * CVE-2022-23960 for this CPU. It returns:
332 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
333 * the CVE.
334 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
335 * mitigate the CVE.
336 *
337 * NOTE: Must be called only after cpu_ops have been initialized
338 * in per-CPU data.
339 */
340 .globl check_smccc_arch_wa3_applies
341func check_smccc_arch_wa3_applies
342 mrs x0, tpidr_el3
343#if ENABLE_ASSERTIONS
344 cmp x0, #0
345 ASM_ASSERT(ne)
346#endif
347 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
348#if ENABLE_ASSERTIONS
349 cmp x0, #0
350 ASM_ASSERT(ne)
351#endif
352 ldr x0, [x0, #CPU_EXTRA3_FUNC]
353 /*
354 * If the reserved function pointer is NULL, this CPU
355 * is unaffected by CVE-2022-23960 so bail out.
356 */
357 cmp x0, #CPU_NO_EXTRA3_FUNC
358 beq 1f
359 br x0
3601:
361 mov x0, #ERRATA_NOT_APPLIES
362 ret
363endfunc check_smccc_arch_wa3_applies