blob: 74d7bb2406b0ad93cbdb9cb3426a8f80a59080e4 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Dimitris Papastamos914757c2018-03-12 14:47:09 +00002 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000010#include <common/debug.h>
David Cunado1f5f8122017-01-17 14:40:15 +000011#include <cpu_macros.S>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012#include <lib/cpus/errata_report.h>
13#include <lib/el3_runtime/cpu_data.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010014
15 /* Reset fn is needed in BL at reset vector */
Roberto Vargase0e99462017-10-30 14:43:43 +000016#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathewc704cbc2014-08-14 11:33:56 +010017 /*
18 * The reset handler common to all platforms. After a matching
19 * cpu_ops structure entry is found, the correponding reset_handler
20 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000021 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010022 */
23 .globl reset_handler
24func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010025 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010026
Soby Mathewb5a63042015-01-29 12:00:58 +000027 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010028 bl plat_reset_handler
29
Soby Mathewc704cbc2014-08-14 11:33:56 +010030 /* Get the matching cpu_ops pointer */
31 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010032#if ENABLE_ASSERTIONS
Soby Mathewc704cbc2014-08-14 11:33:56 +010033 cmp x0, #0
34 ASM_ASSERT(ne)
35#endif
36
37 /* Get the cpu_ops reset handler */
38 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010039 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010040 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000041
42 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010043 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100441:
Soby Mathewc0884332014-09-22 12:11:36 +010045 ret
Kévin Petita877c252015-03-24 14:03:57 +000046endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010047
Roberto Vargase0e99462017-10-30 14:43:43 +000048#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010049
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090050#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010051 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000052 * void prepare_cpu_pwr_dwn(unsigned int power_level)
53 *
54 * Prepare CPU power down function for all platforms. The function takes
55 * a domain level to be powered down as its parameter. After the cpu_ops
56 * pointer is retrieved from cpu_data, the handler for requested power
57 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010058 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000059 .globl prepare_cpu_pwr_dwn
60func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010061 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000062 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
63 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010064 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000065 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
66 cmp x0, x2
67 csel x2, x2, x0, hi
68
Soby Mathew8e2f2872014-08-14 12:49:05 +010069 mrs x1, tpidr_el3
70 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010071#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010072 cmp x0, #0
73 ASM_ASSERT(ne)
74#endif
75
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000076 /* Get the appropriate power down handler */
77 mov x1, #CPU_PWR_DWN_OPS
78 add x1, x1, x2, lsl #3
79 ldr x1, [x0, x1]
Soby Mathew8e2f2872014-08-14 12:49:05 +010080 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000081endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010082
83
84 /*
85 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000086 * in cpu_data. This can be called without a runtime stack, but may
87 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010088 * clobbers: x0 - x6, x10
89 */
90 .globl init_cpu_ops
91func init_cpu_ops
92 mrs x6, tpidr_el3
93 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
94 cbnz x0, 1f
95 mov x10, x30
96 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010097#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010098 cmp x0, #0
99 ASM_ASSERT(ne)
100#endif
Soby Mathew7d861ea2014-11-18 10:14:14 +0000101 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100102 mov x30, x10
1031:
104 ret
Kévin Petita877c252015-03-24 14:03:57 +0000105endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100106#endif /* IMAGE_BL31 */
107
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900108#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100109 /*
110 * The cpu specific registers which need to be reported in a crash
111 * are reported via cpu_ops cpu_reg_dump function. After a matching
112 * cpu_ops structure entry is found, the correponding cpu_reg_dump
113 * in the cpu_ops is invoked.
114 */
115 .globl do_cpu_reg_dump
116func do_cpu_reg_dump
117 mov x16, x30
118
119 /* Get the matching cpu_ops pointer */
120 bl get_cpu_ops_ptr
121 cbz x0, 1f
122
123 /* Get the cpu_ops cpu_reg_dump */
124 ldr x2, [x0, #CPU_REG_DUMP]
125 cbz x2, 1f
126 blr x2
1271:
128 mov x30, x16
129 ret
Kévin Petita877c252015-03-24 14:03:57 +0000130endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100131#endif
132
Soby Mathewc704cbc2014-08-14 11:33:56 +0100133 /*
134 * The below function returns the cpu_ops structure matching the
135 * midr of the core. It reads the MIDR_EL1 and finds the matching
136 * entry in cpu_ops entries. Only the implementation and part number
137 * are used to match the entries.
138 * Return :
139 * x0 - The matching cpu_ops pointer on Success
140 * x0 - 0 on failure.
141 * Clobbers : x0 - x5
142 */
143 .globl get_cpu_ops_ptr
144func get_cpu_ops_ptr
145 /* Get the cpu_ops start and end locations */
146 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
147 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
148
149 /* Initialize the return parameter */
150 mov x0, #0
151
152 /* Read the MIDR_EL1 */
153 mrs x2, midr_el1
154 mov_imm x3, CPU_IMPL_PN_MASK
155
156 /* Retain only the implementation and part number using mask */
157 and w2, w2, w3
1581:
159 /* Check if we have reached end of list */
160 cmp x4, x5
161 b.eq error_exit
162
163 /* load the midr from the cpu_ops */
164 ldr x1, [x4], #CPU_OPS_SIZE
165 and w1, w1, w3
166
167 /* Check if midr matches to midr of this core */
168 cmp w1, w2
169 b.ne 1b
170
171 /* Subtract the increment and offset to get the cpu-ops pointer */
172 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
173error_exit:
174 ret
Kévin Petita877c252015-03-24 14:03:57 +0000175endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100176
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000177/*
178 * Extract CPU revision and variant, and combine them into a single numeric for
179 * easier comparison.
180 */
181 .globl cpu_get_rev_var
182func cpu_get_rev_var
183 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100184
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000185 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000186 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
187 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000188 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000189 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
190 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000191 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000192 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
193 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
194 ret
195endfunc cpu_get_rev_var
196
197/*
198 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
199 * application purposes. If the revision-variant is less than or same as a given
200 * value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100201 *
202 * Shall clobber: x0-x3
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000203 */
204 .globl cpu_rev_var_ls
205func cpu_rev_var_ls
206 mov x2, #ERRATA_APPLIES
207 mov x3, #ERRATA_NOT_APPLIES
208 cmp x0, x1
209 csel x0, x2, x3, ls
210 ret
211endfunc cpu_rev_var_ls
212
Andre Przywara00eefd92016-10-06 16:54:53 +0100213/*
214 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
215 * application purposes. If the revision-variant is higher than or same as a
216 * given value, indicates that errata applies; otherwise not.
Jonathan Wrightefb1f332018-03-28 15:52:03 +0100217 *
218 * Shall clobber: x0-x3
Andre Przywara00eefd92016-10-06 16:54:53 +0100219 */
220 .globl cpu_rev_var_hs
221func cpu_rev_var_hs
222 mov x2, #ERRATA_APPLIES
223 mov x3, #ERRATA_NOT_APPLIES
224 cmp x0, x1
225 csel x0, x2, x3, hs
226 ret
227endfunc cpu_rev_var_hs
228
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000229#if REPORT_ERRATA
230/*
231 * void print_errata_status(void);
232 *
233 * Function to print errata status for CPUs of its class. Must be called only:
234 *
235 * - with MMU and data caches are enabled;
236 * - after cpu_ops have been initialized in per-CPU data.
237 */
238 .globl print_errata_status
239func print_errata_status
240#ifdef IMAGE_BL1
241 /*
242 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
243 * directly.
244 */
245 stp xzr, x30, [sp, #-16]!
246 bl get_cpu_ops_ptr
247 ldp xzr, x30, [sp], #16
248 ldr x1, [x0, #CPU_ERRATA_FUNC]
249 cbnz x1, .Lprint
250#else
251 /*
252 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
253 * errata printing function. If it's non-NULL, jump to the function in
254 * turn.
255 */
256 mrs x0, tpidr_el3
257 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
258 ldr x0, [x1, #CPU_ERRATA_FUNC]
259 cbz x0, .Lnoprint
260
261 /*
262 * Printing errata status requires atomically testing the printed flag.
263 */
dp-arm815faa82017-05-05 12:21:03 +0100264 stp x19, x30, [sp, #-16]!
265 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100266
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000267 /*
268 * Load pointers to errata lock and printed flag. Call
269 * errata_needs_reporting to check whether this CPU needs to report
270 * errata status pertaining to its class.
271 */
272 ldr x0, [x1, #CPU_ERRATA_LOCK]
273 ldr x1, [x1, #CPU_ERRATA_PRINTED]
274 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100275 mov x1, x19
276 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000277 cbnz x0, .Lprint
278#endif
279.Lnoprint:
280 ret
281.Lprint:
282 /* Jump to errata reporting function for this CPU */
283 br x1
284endfunc print_errata_status
285#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000286
287/*
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100288 * int check_wa_cve_2017_5715(void);
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000289 *
290 * This function returns:
291 * - ERRATA_APPLIES when firmware mitigation is required.
292 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
293 * - ERRATA_MISSING when firmware mitigation would be required but
294 * is not compiled in.
295 *
296 * NOTE: Must be called only after cpu_ops have been initialized
297 * in per-CPU data.
298 */
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100299 .globl check_wa_cve_2017_5715
300func check_wa_cve_2017_5715
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000301 mrs x0, tpidr_el3
302#if ENABLE_ASSERTIONS
303 cmp x0, #0
304 ASM_ASSERT(ne)
305#endif
306 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
307 ldr x0, [x0, #CPU_EXTRA1_FUNC]
308 /*
309 * If the reserved function pointer is NULL, this CPU
310 * is unaffected by CVE-2017-5715 so bail out.
311 */
312 cmp x0, #0
313 beq 1f
314 br x0
3151:
316 mov x0, #ERRATA_NOT_APPLIES
317 ret
Dimitris Papastamos570c06a2018-04-06 15:29:34 +0100318endfunc check_wa_cve_2017_5715
Dimitris Papastamosba51d9e2018-05-16 11:36:14 +0100319
320/*
321 * void *wa_cve_2018_3639_get_disable_ptr(void);
322 *
323 * Returns a function pointer which is used to disable mitigation
324 * for CVE-2018-3639.
325 * The function pointer is only returned on cores that employ
326 * dynamic mitigation. If the core uses static mitigation or is
327 * unaffected by CVE-2018-3639 this function returns NULL.
328 *
329 * NOTE: Must be called only after cpu_ops have been initialized
330 * in per-CPU data.
331 */
332 .globl wa_cve_2018_3639_get_disable_ptr
333func wa_cve_2018_3639_get_disable_ptr
334 mrs x0, tpidr_el3
335#if ENABLE_ASSERTIONS
336 cmp x0, #0
337 ASM_ASSERT(ne)
338#endif
339 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
340 ldr x0, [x0, #CPU_EXTRA2_FUNC]
341 ret
342endfunc wa_cve_2018_3639_get_disable_ptr