blob: da663be0e0a4fd2922fa6a814eab2c32c5ebc3d3 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Varun Wadekar4d034c52019-01-11 14:47:48 -08002 * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080010#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000012#include <cpu_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010015
16 /* Reset fn is needed in BL at reset vector */
Roberto Vargase0e99462017-10-30 14:43:43 +000017#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathewc704cbc2014-08-14 11:33:56 +010018 /*
19 * The reset handler common to all platforms. After a matching
20 * cpu_ops structure entry is found, the correponding reset_handler
21 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000022 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010023 */
24 .globl reset_handler
25func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010026 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010027
Soby Mathewb5a63042015-01-29 12:00:58 +000028 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010029 bl plat_reset_handler
30
Soby Mathewc704cbc2014-08-14 11:33:56 +010031 /* Get the matching cpu_ops pointer */
32 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010033#if ENABLE_ASSERTIONS
Soby Mathewc704cbc2014-08-14 11:33:56 +010034 cmp x0, #0
35 ASM_ASSERT(ne)
36#endif
37
38 /* Get the cpu_ops reset handler */
39 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010040 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010041 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000042
43 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010044 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100451:
Soby Mathewc0884332014-09-22 12:11:36 +010046 ret
Kévin Petita877c252015-03-24 14:03:57 +000047endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010048
Roberto Vargase0e99462017-10-30 14:43:43 +000049#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010050
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090051#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010052 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000053 * void prepare_cpu_pwr_dwn(unsigned int power_level)
54 *
55 * Prepare CPU power down function for all platforms. The function takes
56 * a domain level to be powered down as its parameter. After the cpu_ops
57 * pointer is retrieved from cpu_data, the handler for requested power
58 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010059 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000060 .globl prepare_cpu_pwr_dwn
61func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010062 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000063 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
64 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010065 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000066 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
67 cmp x0, x2
68 csel x2, x2, x0, hi
69
Soby Mathew8e2f2872014-08-14 12:49:05 +010070 mrs x1, tpidr_el3
71 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010072#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010073 cmp x0, #0
74 ASM_ASSERT(ne)
75#endif
76
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000077 /* Get the appropriate power down handler */
78 mov x1, #CPU_PWR_DWN_OPS
79 add x1, x1, x2, lsl #3
80 ldr x1, [x0, x1]
Varun Wadekar718c8762019-10-01 09:34:10 -070081#if ENABLE_ASSERTIONS
82 cmp x1, #0
83 ASM_ASSERT(ne)
84#endif
Soby Mathew8e2f2872014-08-14 12:49:05 +010085 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000086endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010087
88
89 /*
90 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000091 * in cpu_data. This can be called without a runtime stack, but may
92 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010093 * clobbers: x0 - x6, x10
94 */
95 .globl init_cpu_ops
96func init_cpu_ops
97 mrs x6, tpidr_el3
98 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
99 cbnz x0, 1f
100 mov x10, x30
101 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +0100102#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +0100103 cmp x0, #0
104 ASM_ASSERT(ne)
105#endif
Soby Mathew7d861ea2014-11-18 10:14:14 +0000106 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100107 mov x30, x10
1081:
109 ret
Kévin Petita877c252015-03-24 14:03:57 +0000110endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100111#endif /* IMAGE_BL31 */
112
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900113#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100114 /*
115 * The cpu specific registers which need to be reported in a crash
116 * are reported via cpu_ops cpu_reg_dump function. After a matching
117 * cpu_ops structure entry is found, the correponding cpu_reg_dump
118 * in the cpu_ops is invoked.
119 */
120 .globl do_cpu_reg_dump
121func do_cpu_reg_dump
122 mov x16, x30
123
124 /* Get the matching cpu_ops pointer */
125 bl get_cpu_ops_ptr
126 cbz x0, 1f
127
128 /* Get the cpu_ops cpu_reg_dump */
129 ldr x2, [x0, #CPU_REG_DUMP]
130 cbz x2, 1f
131 blr x2
1321:
133 mov x30, x16
134 ret
Kévin Petita877c252015-03-24 14:03:57 +0000135endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100136#endif
137
Soby Mathewc704cbc2014-08-14 11:33:56 +0100138 /*
139 * The below function returns the cpu_ops structure matching the
140 * midr of the core. It reads the MIDR_EL1 and finds the matching
141 * entry in cpu_ops entries. Only the implementation and part number
142 * are used to match the entries.
143 * Return :
144 * x0 - The matching cpu_ops pointer on Success
145 * x0 - 0 on failure.
146 * Clobbers : x0 - x5
147 */
148 .globl get_cpu_ops_ptr
149func get_cpu_ops_ptr
150 /* Get the cpu_ops start and end locations */
151 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
152 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
153
154 /* Initialize the return parameter */
155 mov x0, #0
156
157 /* Read the MIDR_EL1 */
158 mrs x2, midr_el1
159 mov_imm x3, CPU_IMPL_PN_MASK
160
161 /* Retain only the implementation and part number using mask */
162 and w2, w2, w3
1631:
164 /* Check if we have reached end of list */
165 cmp x4, x5
166 b.eq error_exit
167
168 /* load the midr from the cpu_ops */
169 ldr x1, [x4], #CPU_OPS_SIZE
170 and w1, w1, w3
171
172 /* Check if midr matches to midr of this core */
173 cmp w1, w2
174 b.ne 1b
175
176 /* Subtract the increment and offset to get the cpu-ops pointer */
177 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
Varun Wadekar718c8762019-10-01 09:34:10 -0700178#if ENABLE_ASSERTIONS
179 cmp x0, #0
180 ASM_ASSERT(ne)
181#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +0100182error_exit:
183 ret
Kévin Petita877c252015-03-24 14:03:57 +0000184endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100185
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000186/*
187 * Extract CPU revision and variant, and combine them into a single numeric for
188 * easier comparison.
189 */
190 .globl cpu_get_rev_var
191func cpu_get_rev_var
192 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100193
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000194 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000195 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
196 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000197 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000198 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
199 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000200 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000201 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
202 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
203 ret
204endfunc cpu_get_rev_var
205
206/*
207 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
208 * application purposes. If the revision-variant is less than or same as a given
209 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100210 *
211 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000212 */
213 .globl cpu_rev_var_ls
214func cpu_rev_var_ls
215 mov x2, #ERRATA_APPLIES
216 mov x3, #ERRATA_NOT_APPLIES
217 cmp x0, x1
218 csel x0, x2, x3, ls
219 ret
220endfunc cpu_rev_var_ls
221
Andre Przywara00eefd92016-10-06 16:54:53 +0100222/*
223 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
224 * application purposes. If the revision-variant is higher than or same as a
225 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100226 *
227 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100228 */
229 .globl cpu_rev_var_hs
230func cpu_rev_var_hs
231 mov x2, #ERRATA_APPLIES
232 mov x3, #ERRATA_NOT_APPLIES
233 cmp x0, x1
234 csel x0, x2, x3, hs
235 ret
236endfunc cpu_rev_var_hs
237
laurenw-arm94accd32019-08-20 15:51:24 -0500238/*
239 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
240 * application purposes. If the revision-variant is between or includes the given
241 * values, this indicates that errata applies; otherwise not.
242 *
243 * Shall clobber: x0-x4
244 */
245 .globl cpu_rev_var_range
246func cpu_rev_var_range
247 mov x3, #ERRATA_APPLIES
248 mov x4, #ERRATA_NOT_APPLIES
249 cmp x0, x1
250 csel x1, x3, x4, hs
251 cbz x1, 1f
252 cmp x0, x2
253 csel x1, x3, x4, ls
2541:
255 mov x0, x1
256 ret
257endfunc cpu_rev_var_range
258
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000259#if REPORT_ERRATA
260/*
261 * void print_errata_status(void);
262 *
263 * Function to print errata status for CPUs of its class. Must be called only:
264 *
265 * - with MMU and data caches are enabled;
266 * - after cpu_ops have been initialized in per-CPU data.
267 */
268 .globl print_errata_status
269func print_errata_status
270#ifdef IMAGE_BL1
271 /*
272 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
273 * directly.
274 */
275 stp xzr, x30, [sp, #-16]!
276 bl get_cpu_ops_ptr
277 ldp xzr, x30, [sp], #16
278 ldr x1, [x0, #CPU_ERRATA_FUNC]
279 cbnz x1, .Lprint
280#else
281 /*
282 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
283 * errata printing function. If it's non-NULL, jump to the function in
284 * turn.
285 */
286 mrs x0, tpidr_el3
Varun Wadekar718c8762019-10-01 09:34:10 -0700287#if ENABLE_ASSERTIONS
288 cmp x0, #0
289 ASM_ASSERT(ne)
290#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000291 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700292#if ENABLE_ASSERTIONS
293 cmp x1, #0
294 ASM_ASSERT(ne)
295#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000296 ldr x0, [x1, #CPU_ERRATA_FUNC]
297 cbz x0, .Lnoprint
298
299 /*
300 * Printing errata status requires atomically testing the printed flag.
301 */
dp-arm815faa82017-05-05 12:21:03 +0100302 stp x19, x30, [sp, #-16]!
303 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100304
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000305 /*
306 * Load pointers to errata lock and printed flag. Call
307 * errata_needs_reporting to check whether this CPU needs to report
308 * errata status pertaining to its class.
309 */
310 ldr x0, [x1, #CPU_ERRATA_LOCK]
311 ldr x1, [x1, #CPU_ERRATA_PRINTED]
312 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100313 mov x1, x19
314 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000315 cbnz x0, .Lprint
316#endif
317.Lnoprint:
318 ret
319.Lprint:
320 /* Jump to errata reporting function for this CPU */
321 br x1
322endfunc print_errata_status
323#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000324
325/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100326 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000327 *
328 * This function returns:
329 * - ERRATA_APPLIES when firmware mitigation is required.
330 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
331 * - ERRATA_MISSING when firmware mitigation would be required but
332 * is not compiled in.
333 *
334 * NOTE: Must be called only after cpu_ops have been initialized
335 * in per-CPU data.
336 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100337 .globl check_wa_cve_2017_5715
338func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000339 mrs x0, tpidr_el3
340#if ENABLE_ASSERTIONS
341 cmp x0, #0
342 ASM_ASSERT(ne)
343#endif
344 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700345#if ENABLE_ASSERTIONS
346 cmp x0, #0
347 ASM_ASSERT(ne)
348#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000349 ldr x0, [x0, #CPU_EXTRA1_FUNC]
350 /*
351 * If the reserved function pointer is NULL, this CPU
352 * is unaffected by CVE-2017-5715 so bail out.
353 */
354 cmp x0, #0
355 beq 1f
356 br x0
3571:
358 mov x0, #ERRATA_NOT_APPLIES
359 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100360endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100361
362/*
363 * void *wa_cve_2018_3639_get_disable_ptr(void);
364 *
365 * Returns a function pointer which is used to disable mitigation
366 * for CVE-2018-3639.
367 * The function pointer is only returned on cores that employ
368 * dynamic mitigation. If the core uses static mitigation or is
369 * unaffected by CVE-2018-3639 this function returns NULL.
370 *
371 * NOTE: Must be called only after cpu_ops have been initialized
372 * in per-CPU data.
373 */
374 .globl wa_cve_2018_3639_get_disable_ptr
375func wa_cve_2018_3639_get_disable_ptr
376 mrs x0, tpidr_el3
377#if ENABLE_ASSERTIONS
378 cmp x0, #0
379 ASM_ASSERT(ne)
380#endif
381 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
Varun Wadekar718c8762019-10-01 09:34:10 -0700382#if ENABLE_ASSERTIONS
383 cmp x0, #0
384 ASM_ASSERT(ne)
385#endif
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100386 ldr x0, [x0, #CPU_EXTRA2_FUNC]
387 ret
388endfunc wa_cve_2018_3639_get_disable_ptr