blob: 1ae31803efb157d09f53c8f852d89c6203050e28 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -06002 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Boyan Karatoteve7d7c272023-01-25 16:55:18 +000013#include <lib/cpus/cpu_ops.h>
Boyan Karatotev5d38cb32023-01-27 09:37:07 +000014#include <lib/cpus/errata.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010016
17 /* Reset fn is needed in BL at reset vector */
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -060018#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
19 (defined(IMAGE_BL2) && RESET_TO_BL2)
Soby Mathewc704cbc2014-08-14 11:33:56 +010020 /*
21 * The reset handler common to all platforms. After a matching
22 * cpu_ops structure entry is found, the correponding reset_handler
23 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000024 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010025 */
26 .globl reset_handler
27func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010028 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010029
Soby Mathewb5a63042015-01-29 12:00:58 +000030 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010031 bl plat_reset_handler
32
Soby Mathewc704cbc2014-08-14 11:33:56 +010033 /* Get the matching cpu_ops pointer */
34 bl get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +010035
Thaddeus Serna3868b232023-07-19 13:59:24 -050036#if ENABLE_ASSERTIONS
37 /*
38 * Assert if invalid cpu_ops obtained. If this is not valid, it may
39 * suggest that the proper CPU file hasn't been included.
40 */
41 cmp x0, #0
42 ASM_ASSERT(ne)
43#endif
44
Soby Mathewc704cbc2014-08-14 11:33:56 +010045 /* Get the cpu_ops reset handler */
46 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010047 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010048 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000049
50 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010051 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100521:
Soby Mathewc0884332014-09-22 12:11:36 +010053 ret
Kévin Petita877c252015-03-24 14:03:57 +000054endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010055
Roberto Vargase0e99462017-10-30 14:43:43 +000056#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010057
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090058#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010059 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000060 * void prepare_cpu_pwr_dwn(unsigned int power_level)
61 *
62 * Prepare CPU power down function for all platforms. The function takes
63 * a domain level to be powered down as its parameter. After the cpu_ops
64 * pointer is retrieved from cpu_data, the handler for requested power
65 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010066 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000067 .globl prepare_cpu_pwr_dwn
68func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010069 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000070 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
71 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010072 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000073 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
74 cmp x0, x2
75 csel x2, x2, x0, hi
76
Soby Mathew8e2f2872014-08-14 12:49:05 +010077 mrs x1, tpidr_el3
78 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010079#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010080 cmp x0, #0
81 ASM_ASSERT(ne)
82#endif
83
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000084 /* Get the appropriate power down handler */
85 mov x1, #CPU_PWR_DWN_OPS
86 add x1, x1, x2, lsl #3
87 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070088#if ENABLE_ASSERTIONS
89 cmp x1, #0
90 ASM_ASSERT(ne)
91#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010092 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000093endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010094
95
96 /*
97 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000098 * in cpu_data. This can be called without a runtime stack, but may
99 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +0100100 * clobbers: x0 - x6, x10
101 */
102 .globl init_cpu_ops
103func init_cpu_ops
104 mrs x6, tpidr_el3
105 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
106 cbnz x0, 1f
107 mov x10, x30
108 bl get_cpu_ops_ptr
Soby Mathew7d861ea2014-11-18 10:14:14 +0000109 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100110 mov x30, x10
1111:
112 ret
Kévin Petita877c252015-03-24 14:03:57 +0000113endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100114#endif /* IMAGE_BL31 */
115
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900116#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100117 /*
118 * The cpu specific registers which need to be reported in a crash
119 * are reported via cpu_ops cpu_reg_dump function. After a matching
120 * cpu_ops structure entry is found, the correponding cpu_reg_dump
121 * in the cpu_ops is invoked.
122 */
123 .globl do_cpu_reg_dump
124func do_cpu_reg_dump
125 mov x16, x30
126
127 /* Get the matching cpu_ops pointer */
128 bl get_cpu_ops_ptr
129 cbz x0, 1f
130
131 /* Get the cpu_ops cpu_reg_dump */
132 ldr x2, [x0, #CPU_REG_DUMP]
133 cbz x2, 1f
134 blr x2
1351:
136 mov x30, x16
137 ret
Kévin Petita877c252015-03-24 14:03:57 +0000138endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100139#endif
140
Soby Mathewc704cbc2014-08-14 11:33:56 +0100141 /*
142 * The below function returns the cpu_ops structure matching the
143 * midr of the core. It reads the MIDR_EL1 and finds the matching
144 * entry in cpu_ops entries. Only the implementation and part number
145 * are used to match the entries.
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100146 *
147 * If cpu_ops for the MIDR_EL1 cannot be found and
148 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
149 * default cpu_ops with an MIDR value of 0.
Olivier Deprez7d0299f2021-05-25 12:06:03 +0200150 * (Implementation number 0x0 should be reserved for software use
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100151 * and therefore no clashes should happen with that default value).
152 *
Soby Mathewc704cbc2014-08-14 11:33:56 +0100153 * Return :
154 * x0 - The matching cpu_ops pointer on Success
155 * x0 - 0 on failure.
156 * Clobbers : x0 - x5
157 */
158 .globl get_cpu_ops_ptr
159func get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100160 /* Read the MIDR_EL1 */
161 mrs x2, midr_el1
162 mov_imm x3, CPU_IMPL_PN_MASK
163
164 /* Retain only the implementation and part number using mask */
165 and w2, w2, w3
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100166
167 /* Get the cpu_ops end location */
168 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
169
170 /* Initialize the return parameter */
171 mov x0, #0
Soby Mathewc704cbc2014-08-14 11:33:56 +01001721:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100173 /* Get the cpu_ops start location */
174 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
175
1762:
Soby Mathewc704cbc2014-08-14 11:33:56 +0100177 /* Check if we have reached end of list */
178 cmp x4, x5
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100179 b.eq search_def_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100180
181 /* load the midr from the cpu_ops */
182 ldr x1, [x4], #CPU_OPS_SIZE
183 and w1, w1, w3
184
185 /* Check if midr matches to midr of this core */
186 cmp w1, w2
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100187 b.ne 2b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100188
189 /* Subtract the increment and offset to get the cpu-ops pointer */
190 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700191#if ENABLE_ASSERTIONS
192 cmp x0, #0
193 ASM_ASSERT(ne)
194#endif
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100195#ifdef SUPPORT_UNKNOWN_MPID
196 cbnz x2, exit_mpid_found
197 /* Mark the unsupported MPID flag */
198 adrp x1, unsupported_mpid_flag
199 add x1, x1, :lo12:unsupported_mpid_flag
200 str w2, [x1]
201exit_mpid_found:
202#endif
203 ret
204
205 /*
206 * Search again for a default pointer (MIDR = 0x0)
207 * or return error if already searched.
208 */
209search_def_ptr:
210#ifdef SUPPORT_UNKNOWN_MPID
211 cbz x2, error_exit
212 mov x2, #0
213 b 1b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100214error_exit:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100215#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100216 ret
Kévin Petita877c252015-03-24 14:03:57 +0000217endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100218
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000219/*
220 * Extract CPU revision and variant, and combine them into a single numeric for
221 * easier comparison.
222 */
223 .globl cpu_get_rev_var
224func cpu_get_rev_var
225 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100226
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000227 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000228 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
229 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000230 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000231 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
232 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000233 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000234 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
235 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
236 ret
237endfunc cpu_get_rev_var
238
239/*
240 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
241 * application purposes. If the revision-variant is less than or same as a given
242 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100243 *
244 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000245 */
246 .globl cpu_rev_var_ls
247func cpu_rev_var_ls
248 mov x2, #ERRATA_APPLIES
249 mov x3, #ERRATA_NOT_APPLIES
250 cmp x0, x1
251 csel x0, x2, x3, ls
252 ret
253endfunc cpu_rev_var_ls
254
Andre Przywara00eefd92016-10-06 16:54:53 +0100255/*
256 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
257 * application purposes. If the revision-variant is higher than or same as a
258 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100259 *
260 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100261 */
262 .globl cpu_rev_var_hs
263func cpu_rev_var_hs
264 mov x2, #ERRATA_APPLIES
265 mov x3, #ERRATA_NOT_APPLIES
266 cmp x0, x1
267 csel x0, x2, x3, hs
268 ret
269endfunc cpu_rev_var_hs
270
laurenw-arm94accd32019-08-20 15:51:24 -0500271/*
272 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
273 * application purposes. If the revision-variant is between or includes the given
274 * values, this indicates that errata applies; otherwise not.
275 *
276 * Shall clobber: x0-x4
277 */
278 .globl cpu_rev_var_range
279func cpu_rev_var_range
280 mov x3, #ERRATA_APPLIES
281 mov x4, #ERRATA_NOT_APPLIES
282 cmp x0, x1
283 csel x1, x3, x4, hs
284 cbz x1, 1f
285 cmp x0, x2
286 csel x1, x3, x4, ls
2871:
288 mov x0, x1
289 ret
290endfunc cpu_rev_var_range
291
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000292/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100293 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000294 *
295 * This function returns:
296 * - ERRATA_APPLIES when firmware mitigation is required.
297 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
298 * - ERRATA_MISSING when firmware mitigation would be required but
299 * is not compiled in.
300 *
301 * NOTE: Must be called only after cpu_ops have been initialized
302 * in per-CPU data.
303 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100304 .globl check_wa_cve_2017_5715
305func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000306 mrs x0, tpidr_el3
307#if ENABLE_ASSERTIONS
308 cmp x0, #0
309 ASM_ASSERT(ne)
310#endif
311 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700312#if ENABLE_ASSERTIONS
313 cmp x0, #0
314 ASM_ASSERT(ne)
315#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000316 ldr x0, [x0, #CPU_EXTRA1_FUNC]
317 /*
318 * If the reserved function pointer is NULL, this CPU
319 * is unaffected by CVE-2017-5715 so bail out.
320 */
Bipin Ravicaa2e052022-02-23 23:45:50 -0600321 cmp x0, #CPU_NO_EXTRA1_FUNC
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000322 beq 1f
323 br x0
3241:
325 mov x0, #ERRATA_NOT_APPLIES
326 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100327endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100328
329/*
330 * void *wa_cve_2018_3639_get_disable_ptr(void);
331 *
332 * Returns a function pointer which is used to disable mitigation
333 * for CVE-2018-3639.
334 * The function pointer is only returned on cores that employ
335 * dynamic mitigation. If the core uses static mitigation or is
336 * unaffected by CVE-2018-3639 this function returns NULL.
337 *
338 * NOTE: Must be called only after cpu_ops have been initialized
339 * in per-CPU data.
340 */
341 .globl wa_cve_2018_3639_get_disable_ptr
342func wa_cve_2018_3639_get_disable_ptr
343 mrs x0, tpidr_el3
344#if ENABLE_ASSERTIONS
345 cmp x0, #0
346 ASM_ASSERT(ne)
347#endif
348 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700349#if ENABLE_ASSERTIONS
350 cmp x0, #0
351 ASM_ASSERT(ne)
352#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100353 ldr x0, [x0, #CPU_EXTRA2_FUNC]
354 ret
355endfunc wa_cve_2018_3639_get_disable_ptr
Bipin Ravicaa2e052022-02-23 23:45:50 -0600356
357/*
358 * int check_smccc_arch_wa3_applies(void);
359 *
360 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
361 * CVE-2022-23960 for this CPU. It returns:
362 * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
363 * the CVE.
364 * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
365 * mitigate the CVE.
366 *
367 * NOTE: Must be called only after cpu_ops have been initialized
368 * in per-CPU data.
369 */
370 .globl check_smccc_arch_wa3_applies
371func check_smccc_arch_wa3_applies
372 mrs x0, tpidr_el3
373#if ENABLE_ASSERTIONS
374 cmp x0, #0
375 ASM_ASSERT(ne)
376#endif
377 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
378#if ENABLE_ASSERTIONS
379 cmp x0, #0
380 ASM_ASSERT(ne)
381#endif
382 ldr x0, [x0, #CPU_EXTRA3_FUNC]
383 /*
384 * If the reserved function pointer is NULL, this CPU
385 * is unaffected by CVE-2022-23960 so bail out.
386 */
387 cmp x0, #CPU_NO_EXTRA3_FUNC
388 beq 1f
389 br x0
3901:
391 mov x0, #ERRATA_NOT_APPLIES
392 ret
393endfunc check_smccc_arch_wa3_applies