blob: bd8f85f6dbbfe513445faeb4fda4c101793dffe7 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Olivier Deprez7d0299f2021-05-25 12:06:03 +02002 * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010015
16 /* Reset fn is needed in BL at reset vector */
Roberto Vargase0e99462017-10-30 14:43:43 +000017#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathewc704cbc2014-08-14 11:33:56 +010018 /*
19 * The reset handler common to all platforms. After a matching
20 * cpu_ops structure entry is found, the correponding reset_handler
21 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000022 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010023 */
24 .globl reset_handler
25func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010026 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010027
Soby Mathewb5a63042015-01-29 12:00:58 +000028 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010029 bl plat_reset_handler
30
Soby Mathewc704cbc2014-08-14 11:33:56 +010031 /* Get the matching cpu_ops pointer */
32 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010033#if ENABLE_ASSERTIONS
Soby Mathewc704cbc2014-08-14 11:33:56 +010034 cmp x0, #0
35 ASM_ASSERT(ne)
36#endif
37
38 /* Get the cpu_ops reset handler */
39 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010040 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010041 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000042
43 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010044 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100451:
Soby Mathewc0884332014-09-22 12:11:36 +010046 ret
Kévin Petita877c252015-03-24 14:03:57 +000047endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010048
Roberto Vargase0e99462017-10-30 14:43:43 +000049#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010050
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090051#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010052 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000053 * void prepare_cpu_pwr_dwn(unsigned int power_level)
54 *
55 * Prepare CPU power down function for all platforms. The function takes
56 * a domain level to be powered down as its parameter. After the cpu_ops
57 * pointer is retrieved from cpu_data, the handler for requested power
58 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010059 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000060 .globl prepare_cpu_pwr_dwn
61func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010062 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000063 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
64 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010065 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000066 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
67 cmp x0, x2
68 csel x2, x2, x0, hi
69
Soby Mathew8e2f2872014-08-14 12:49:05 +010070 mrs x1, tpidr_el3
71 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010072#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010073 cmp x0, #0
74 ASM_ASSERT(ne)
75#endif
76
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000077 /* Get the appropriate power down handler */
78 mov x1, #CPU_PWR_DWN_OPS
79 add x1, x1, x2, lsl #3
80 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070081#if ENABLE_ASSERTIONS
82 cmp x1, #0
83 ASM_ASSERT(ne)
84#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010085 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000086endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010087
88
89 /*
90 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000091 * in cpu_data. This can be called without a runtime stack, but may
92 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010093 * clobbers: x0 - x6, x10
94 */
95 .globl init_cpu_ops
96func init_cpu_ops
97 mrs x6, tpidr_el3
98 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
99 cbnz x0, 1f
100 mov x10, x30
101 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +0100102#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +0100103 cmp x0, #0
104 ASM_ASSERT(ne)
105#endif
Soby Mathew7d861ea2014-11-18 10:14:14 +0000106 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100107 mov x30, x10
1081:
109 ret
Kévin Petita877c252015-03-24 14:03:57 +0000110endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100111#endif /* IMAGE_BL31 */
112
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900113#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100114 /*
115 * The cpu specific registers which need to be reported in a crash
116 * are reported via cpu_ops cpu_reg_dump function. After a matching
117 * cpu_ops structure entry is found, the correponding cpu_reg_dump
118 * in the cpu_ops is invoked.
119 */
120 .globl do_cpu_reg_dump
121func do_cpu_reg_dump
122 mov x16, x30
123
124 /* Get the matching cpu_ops pointer */
125 bl get_cpu_ops_ptr
126 cbz x0, 1f
127
128 /* Get the cpu_ops cpu_reg_dump */
129 ldr x2, [x0, #CPU_REG_DUMP]
130 cbz x2, 1f
131 blr x2
1321:
133 mov x30, x16
134 ret
Kévin Petita877c252015-03-24 14:03:57 +0000135endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100136#endif
137
Soby Mathewc704cbc2014-08-14 11:33:56 +0100138 /*
139 * The below function returns the cpu_ops structure matching the
140 * midr of the core. It reads the MIDR_EL1 and finds the matching
141 * entry in cpu_ops entries. Only the implementation and part number
142 * are used to match the entries.
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100143 *
144 * If cpu_ops for the MIDR_EL1 cannot be found and
145 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
146 * default cpu_ops with an MIDR value of 0.
Olivier Deprez7d0299f2021-05-25 12:06:03 +0200147 * (Implementation number 0x0 should be reserved for software use
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100148 * and therefore no clashes should happen with that default value).
149 *
Soby Mathewc704cbc2014-08-14 11:33:56 +0100150 * Return :
151 * x0 - The matching cpu_ops pointer on Success
152 * x0 - 0 on failure.
153 * Clobbers : x0 - x5
154 */
155 .globl get_cpu_ops_ptr
156func get_cpu_ops_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100157 /* Read the MIDR_EL1 */
158 mrs x2, midr_el1
159 mov_imm x3, CPU_IMPL_PN_MASK
160
161 /* Retain only the implementation and part number using mask */
162 and w2, w2, w3
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100163
164 /* Get the cpu_ops end location */
165 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
166
167 /* Initialize the return parameter */
168 mov x0, #0
Soby Mathewc704cbc2014-08-14 11:33:56 +01001691:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100170 /* Get the cpu_ops start location */
171 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
172
1732:
Soby Mathewc704cbc2014-08-14 11:33:56 +0100174 /* Check if we have reached end of list */
175 cmp x4, x5
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100176 b.eq search_def_ptr
Soby Mathewc704cbc2014-08-14 11:33:56 +0100177
178 /* load the midr from the cpu_ops */
179 ldr x1, [x4], #CPU_OPS_SIZE
180 and w1, w1, w3
181
182 /* Check if midr matches to midr of this core */
183 cmp w1, w2
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100184 b.ne 2b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100185
186 /* Subtract the increment and offset to get the cpu-ops pointer */
187 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700188#if ENABLE_ASSERTIONS
189 cmp x0, #0
190 ASM_ASSERT(ne)
191#endif
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100192#ifdef SUPPORT_UNKNOWN_MPID
193 cbnz x2, exit_mpid_found
194 /* Mark the unsupported MPID flag */
195 adrp x1, unsupported_mpid_flag
196 add x1, x1, :lo12:unsupported_mpid_flag
197 str w2, [x1]
198exit_mpid_found:
199#endif
200 ret
201
202 /*
203 * Search again for a default pointer (MIDR = 0x0)
204 * or return error if already searched.
205 */
206search_def_ptr:
207#ifdef SUPPORT_UNKNOWN_MPID
208 cbz x2, error_exit
209 mov x2, #0
210 b 1b
Soby Mathewc704cbc2014-08-14 11:33:56 +0100211error_exit:
Javier Almansa Sobrinoe1ecd232020-08-20 18:48:09 +0100212#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100213 ret
Kévin Petita877c252015-03-24 14:03:57 +0000214endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100215
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000216/*
217 * Extract CPU revision and variant, and combine them into a single numeric for
218 * easier comparison.
219 */
220 .globl cpu_get_rev_var
221func cpu_get_rev_var
222 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100223
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000224 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000225 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
226 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000227 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000228 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
229 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000230 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000231 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
232 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
233 ret
234endfunc cpu_get_rev_var
235
236/*
237 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
238 * application purposes. If the revision-variant is less than or same as a given
239 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100240 *
241 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000242 */
243 .globl cpu_rev_var_ls
244func cpu_rev_var_ls
245 mov x2, #ERRATA_APPLIES
246 mov x3, #ERRATA_NOT_APPLIES
247 cmp x0, x1
248 csel x0, x2, x3, ls
249 ret
250endfunc cpu_rev_var_ls
251
Andre Przywara00eefd92016-10-06 16:54:53 +0100252/*
253 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
254 * application purposes. If the revision-variant is higher than or same as a
255 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100256 *
257 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100258 */
259 .globl cpu_rev_var_hs
260func cpu_rev_var_hs
261 mov x2, #ERRATA_APPLIES
262 mov x3, #ERRATA_NOT_APPLIES
263 cmp x0, x1
264 csel x0, x2, x3, hs
265 ret
266endfunc cpu_rev_var_hs
267
laurenw-arm94accd32019-08-20 15:51:24 -0500268/*
269 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
270 * application purposes. If the revision-variant is between or includes the given
271 * values, this indicates that errata applies; otherwise not.
272 *
273 * Shall clobber: x0-x4
274 */
275 .globl cpu_rev_var_range
276func cpu_rev_var_range
277 mov x3, #ERRATA_APPLIES
278 mov x4, #ERRATA_NOT_APPLIES
279 cmp x0, x1
280 csel x1, x3, x4, hs
281 cbz x1, 1f
282 cmp x0, x2
283 csel x1, x3, x4, ls
2841:
285 mov x0, x1
286 ret
287endfunc cpu_rev_var_range
288
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000289#if REPORT_ERRATA
290/*
291 * void print_errata_status(void);
292 *
293 * Function to print errata status for CPUs of its class. Must be called only:
294 *
295 * - with MMU and data caches are enabled;
296 * - after cpu_ops have been initialized in per-CPU data.
297 */
298 .globl print_errata_status
299func print_errata_status
300#ifdef IMAGE_BL1
301 /*
302 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
303 * directly.
304 */
305 stp xzr, x30, [sp, #-16]!
306 bl get_cpu_ops_ptr
307 ldp xzr, x30, [sp], #16
308 ldr x1, [x0, #CPU_ERRATA_FUNC]
309 cbnz x1, .Lprint
310#else
311 /*
312 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
313 * errata printing function. If it's non-NULL, jump to the function in
314 * turn.
315 */
316 mrs x0, tpidr_el3
Varun Wadekar718c8762019-10-01 09:34:10 -0700317#if ENABLE_ASSERTIONS
318 cmp x0, #0
319 ASM_ASSERT(ne)
320#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000321 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700322#if ENABLE_ASSERTIONS
323 cmp x1, #0
324 ASM_ASSERT(ne)
325#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000326 ldr x0, [x1, #CPU_ERRATA_FUNC]
327 cbz x0, .Lnoprint
328
329 /*
330 * Printing errata status requires atomically testing the printed flag.
331 */
dp-arm815faa82017-05-05 12:21:03 +0100332 stp x19, x30, [sp, #-16]!
333 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100334
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000335 /*
336 * Load pointers to errata lock and printed flag. Call
337 * errata_needs_reporting to check whether this CPU needs to report
338 * errata status pertaining to its class.
339 */
340 ldr x0, [x1, #CPU_ERRATA_LOCK]
341 ldr x1, [x1, #CPU_ERRATA_PRINTED]
342 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100343 mov x1, x19
344 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000345 cbnz x0, .Lprint
346#endif
347.Lnoprint:
348 ret
349.Lprint:
350 /* Jump to errata reporting function for this CPU */
351 br x1
352endfunc print_errata_status
353#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000354
355/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100356 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000357 *
358 * This function returns:
359 * - ERRATA_APPLIES when firmware mitigation is required.
360 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
361 * - ERRATA_MISSING when firmware mitigation would be required but
362 * is not compiled in.
363 *
364 * NOTE: Must be called only after cpu_ops have been initialized
365 * in per-CPU data.
366 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100367 .globl check_wa_cve_2017_5715
368func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000369 mrs x0, tpidr_el3
370#if ENABLE_ASSERTIONS
371 cmp x0, #0
372 ASM_ASSERT(ne)
373#endif
374 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700375#if ENABLE_ASSERTIONS
376 cmp x0, #0
377 ASM_ASSERT(ne)
378#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000379 ldr x0, [x0, #CPU_EXTRA1_FUNC]
380 /*
381 * If the reserved function pointer is NULL, this CPU
382 * is unaffected by CVE-2017-5715 so bail out.
383 */
384 cmp x0, #0
385 beq 1f
386 br x0
3871:
388 mov x0, #ERRATA_NOT_APPLIES
389 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100390endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100391
392/*
393 * void *wa_cve_2018_3639_get_disable_ptr(void);
394 *
395 * Returns a function pointer which is used to disable mitigation
396 * for CVE-2018-3639.
397 * The function pointer is only returned on cores that employ
398 * dynamic mitigation. If the core uses static mitigation or is
399 * unaffected by CVE-2018-3639 this function returns NULL.
400 *
401 * NOTE: Must be called only after cpu_ops have been initialized
402 * in per-CPU data.
403 */
404 .globl wa_cve_2018_3639_get_disable_ptr
405func wa_cve_2018_3639_get_disable_ptr
406 mrs x0, tpidr_el3
407#if ENABLE_ASSERTIONS
408 cmp x0, #0
409 ASM_ASSERT(ne)
410#endif
411 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700412#if ENABLE_ASSERTIONS
413 cmp x0, #0
414 ASM_ASSERT(ne)
415#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100416 ldr x0, [x0, #CPU_EXTRA2_FUNC]
417 ret
418endfunc wa_cve_2018_3639_get_disable_ptr