blob: e60842211a8760b639000af21897ced13e044ee4 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Boyan Karatotev23f4ff92025-01-21 08:44:52 +00002 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000013#include <lib/cpus/cpu_ops.h>
Boyan Karatotev5d38cb32023-01-27 09:37:07 +000014#include <lib/cpus/errata.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010016
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090017#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010018 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000019 * void prepare_cpu_pwr_dwn(unsigned int power_level)
20 *
21 * Prepare CPU power down function for all platforms. The function takes
22 * a domain level to be powered down as its parameter. After the cpu_ops
23 * pointer is retrieved from cpu_data, the handler for requested power
24 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010025 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000026 .globl prepare_cpu_pwr_dwn
27func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010028 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000029 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
30 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010031 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000032 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
33 cmp x0, x2
34 csel x2, x2, x0, hi
35
Soby Mathew8e2f2872014-08-14 12:49:05 +010036 mrs x1, tpidr_el3
37 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010038#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010039 cmp x0, #0
40 ASM_ASSERT(ne)
41#endif
42
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000043 /* Get the appropriate power down handler */
44 mov x1, #CPU_PWR_DWN_OPS
45 add x1, x1, x2, lsl #3
46 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070047#if ENABLE_ASSERTIONS
48 cmp x1, #0
49 ASM_ASSERT(ne)
50#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010051 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000052endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010053
54
55 /*
56 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000057 * in cpu_data. This can be called without a runtime stack, but may
58 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010059 * clobbers: x0 - x6, x10
60 */
61 .globl init_cpu_ops
62func init_cpu_ops
63 mrs x6, tpidr_el3
64 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
65 cbnz x0, 1f
66 mov x10, x30
67 bl get_cpu_ops_ptr
Soby Mathew7d861ea2014-11-18 10:14:14 +000068 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +010069 mov x30, x10
701:
71 ret
Kévin Petita877c252015-03-24 14:03:57 +000072endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +010073#endif /* IMAGE_BL31 */
74
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090075#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +010076 /*
77 * The cpu specific registers which need to be reported in a crash
78 * are reported via cpu_ops cpu_reg_dump function. After a matching
79 * cpu_ops structure entry is found, the correponding cpu_reg_dump
80 * in the cpu_ops is invoked.
81 */
82 .globl do_cpu_reg_dump
83func do_cpu_reg_dump
84 mov x16, x30
85
86 /* Get the matching cpu_ops pointer */
87 bl get_cpu_ops_ptr
88 cbz x0, 1f
89
90 /* Get the cpu_ops cpu_reg_dump */
91 ldr x2, [x0, #CPU_REG_DUMP]
92 cbz x2, 1f
93 blr x2
941:
95 mov x30, x16
96 ret
Kévin Petita877c252015-03-24 14:03:57 +000097endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +010098#endif
99
Soby Mathewc704cbc2014-08-14 11:33:56 +0100100 /*
101 * The below function returns the cpu_ops structure matching the
102 * midr of the core. It reads the MIDR_EL1 and finds the matching
103 * entry in cpu_ops entries. Only the implementation and part number
104 * are used to match the entries.
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100105 *
106 * If cpu_ops for the MIDR_EL1 cannot be found and
107 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
108 * default cpu_ops with an MIDR value of 0.
Olivier Deprez7d0299f2021-05-25 12:06:03 +0200109 * (Implementation number 0x0 should be reserved for software use
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100110 * and therefore no clashes should happen with that default value).
111 *
Soby Mathewc704cbc2014-08-14 11:33:56 +0100112 * Return :
113 * x0 - The matching cpu_ops pointer on Success
114 * x0 - 0 on failure.
115 * Clobbers : x0 - x5
116 */
117 .globl get_cpu_ops_ptr
118func get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100119 /* Read the MIDR_EL1 */
120 mrs x2, midr_el1
121 mov_imm x3, CPU_IMPL_PN_MASK
122
123 /* Retain only the implementation and part number using mask */
124 and w2, w2, w3
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100125
126 /* Get the cpu_ops end location */
developerff69a6a2024-02-21 13:58:51 +0800127 adr_l x5, (__CPU_OPS_END__ + CPU_MIDR)
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100128
129 /* Initialize the return parameter */
130 mov x0, #0
Soby Mathewc704cbc2014-08-14 11:33:56 +01001311:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100132 /* Get the cpu_ops start location */
developerff69a6a2024-02-21 13:58:51 +0800133 adr_l x4, (__CPU_OPS_START__ + CPU_MIDR)
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100134
1352:
Soby Mathewc704cbc2014-08-14 11:33:56 +0100136 /* Check if we have reached end of list */
137 cmp x4, x5
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100138 b.eq search_def_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100139
140 /* load the midr from the cpu_ops */
141 ldr x1, [x4], #CPU_OPS_SIZE
142 and w1, w1, w3
143
144 /* Check if midr matches to midr of this core */
145 cmp w1, w2
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100146 b.ne 2b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100147
148 /* Subtract the increment and offset to get the cpu-ops pointer */
149 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700150#if ENABLE_ASSERTIONS
151 cmp x0, #0
152 ASM_ASSERT(ne)
153#endif
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100154#ifdef SUPPORT_UNKNOWN_MPID
155 cbnz x2, exit_mpid_found
156 /* Mark the unsupported MPID flag */
157 adrp x1, unsupported_mpid_flag
158 add x1, x1, :lo12:unsupported_mpid_flag
159 str w2, [x1]
160exit_mpid_found:
161#endif
162 ret
163
164 /*
165 * Search again for a default pointer (MIDR = 0x0)
166 * or return error if already searched.
167 */
168search_def_ptr:
169#ifdef SUPPORT_UNKNOWN_MPID
170 cbz x2, error_exit
171 mov x2, #0
172 b 1b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100173error_exit:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100174#endif
Boyan Karatotev1dcba8f2024-11-19 11:27:01 +0000175#if ENABLE_ASSERTIONS
176 /*
177 * Assert if invalid cpu_ops obtained. If this is not valid, it may
178 * suggest that the proper CPU file hasn't been included.
179 */
180 cmp x0, #0
181 ASM_ASSERT(ne)
182#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100183 ret
Kévin Petita877c252015-03-24 14:03:57 +0000184endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100185
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000186 .globl cpu_get_rev_var
187func cpu_get_rev_var
Boyan Karatotev5c074d32024-12-04 15:25:27 +0000188 get_rev_var x0, x1
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000189 ret
190endfunc cpu_get_rev_var
191
192/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100193 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000194 *
195 * This function returns:
196 * - ERRATA_APPLIES when firmware mitigation is required.
197 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
198 * - ERRATA_MISSING when firmware mitigation would be required but
199 * is not compiled in.
200 *
201 * NOTE: Must be called only after cpu_ops have been initialized
202 * in per-CPU data.
203 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100204 .globl check_wa_cve_2017_5715
205func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000206 mrs x0, tpidr_el3
207#if ENABLE_ASSERTIONS
208 cmp x0, #0
209 ASM_ASSERT(ne)
210#endif
211 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700212#if ENABLE_ASSERTIONS
213 cmp x0, #0
214 ASM_ASSERT(ne)
215#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000216 ldr x0, [x0, #CPU_EXTRA1_FUNC]
217 /*
218 * If the reserved function pointer is NULL, this CPU
219 * is unaffected by CVE-2017-5715 so bail out.
220 */
Bipin Ravicaa2e052022-02-23 23:45:50 -0600221 cmp x0, #CPU_NO_EXTRA1_FUNC
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000222 beq 1f
223 br x0
2241:
225 mov x0, #ERRATA_NOT_APPLIES
226 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100227endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100228
229/*
Arvind Ram Prakashfaa857d2025-01-28 17:21:17 -0600230 * int check_wa_cve_2024_7881(void);
231 *
232 * This function returns:
233 * - ERRATA_APPLIES when firmware mitigation is required.
234 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
235 * - ERRATA_MISSING when firmware mitigation would be required but
236 * is not compiled in.
237 *
238 * NOTE: Must be called only after cpu_ops have been initialized
239 * in per-CPU data.
240 */
241.globl check_wa_cve_2024_7881
242func check_wa_cve_2024_7881
243 mrs x0, tpidr_el3
244#if ENABLE_ASSERTIONS
245 cmp x0, #0
246 ASM_ASSERT(ne)
247#endif
248 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
249#if ENABLE_ASSERTIONS
250 cmp x0, #0
251 ASM_ASSERT(ne)
252#endif
253 ldr x0, [x0, #CPU_EXTRA4_FUNC]
254 /*
255 * If the reserved function pointer is NULL, this CPU
256 * is unaffected by CVE-2024-7881 so bail out.
257 */
258 cmp x0, #CPU_NO_EXTRA4_FUNC
259 beq 1f
260 br x0
2611:
262 mov x0, #ERRATA_NOT_APPLIES
263 ret
264endfunc check_wa_cve_2024_7881
265
266/*
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100267 * void *wa_cve_2018_3639_get_disable_ptr(void);
268 *
269 * Returns a function pointer which is used to disable mitigation
270 * for CVE-2018-3639.
271 * The function pointer is only returned on cores that employ
272 * dynamic mitigation. If the core uses static mitigation or is
273 * unaffected by CVE-2018-3639 this function returns NULL.
274 *
275 * NOTE: Must be called only after cpu_ops have been initialized
276 * in per-CPU data.
277 */
278 .globl wa_cve_2018_3639_get_disable_ptr
279func wa_cve_2018_3639_get_disable_ptr
280 mrs x0, tpidr_el3
281#if ENABLE_ASSERTIONS
282 cmp x0, #0
283 ASM_ASSERT(ne)
284#endif
285 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700286#if ENABLE_ASSERTIONS
287 cmp x0, #0
288 ASM_ASSERT(ne)
289#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100290 ldr x0, [x0, #CPU_EXTRA2_FUNC]
291 ret
292endfunc wa_cve_2018_3639_get_disable_ptr
Bipin Ravicaa2e052022-02-23 23:45:50 -0600293
294/*
295 * int check_smccc_arch_wa3_applies(void);
296 *
297 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
298 * CVE-2022-23960 for this CPU. It returns:
299 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
300 * the CVE.
301 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
302 * mitigate the CVE.
303 *
304 * NOTE: Must be called only after cpu_ops have been initialized
305 * in per-CPU data.
306 */
307 .globl check_smccc_arch_wa3_applies
308func check_smccc_arch_wa3_applies
309 mrs x0, tpidr_el3
310#if ENABLE_ASSERTIONS
311 cmp x0, #0
312 ASM_ASSERT(ne)
313#endif
314 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
315#if ENABLE_ASSERTIONS
316 cmp x0, #0
317 ASM_ASSERT(ne)
318#endif
319 ldr x0, [x0, #CPU_EXTRA3_FUNC]
320 /*
321 * If the reserved function pointer is NULL, this CPU
322 * is unaffected by CVE-2022-23960 so bail out.
323 */
324 cmp x0, #CPU_NO_EXTRA3_FUNC
325 beq 1f
326 br x0
3271:
328 mov x0, #ERRATA_NOT_APPLIES
329 ret
330endfunc check_smccc_arch_wa3_applies