blob: 5a9226d8326430fe0d1dc8615d9b026f33fa8b61 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Dimitris Papastamos914757c2018-03-12 14:47:09 +00002 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Soby Mathewc704cbc2014-08-14 11:33:56 +010010#include <cpu_data.h>
David Cunado1f5f8122017-01-17 14:40:15 +000011#include <cpu_macros.S>
Soby Mathew6b28c572016-03-21 10:36:47 +000012#include <debug.h>
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +000013#include <errata_report.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010014
15 /* Reset fn is needed in BL at reset vector */
Roberto Vargase0e99462017-10-30 14:43:43 +000016#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathewc704cbc2014-08-14 11:33:56 +010017 /*
18 * The reset handler common to all platforms. After a matching
19 * cpu_ops structure entry is found, the correponding reset_handler
20 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000021 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010022 */
23 .globl reset_handler
24func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010025 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010026
Soby Mathewb5a63042015-01-29 12:00:58 +000027 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010028 bl plat_reset_handler
29
Soby Mathewc704cbc2014-08-14 11:33:56 +010030 /* Get the matching cpu_ops pointer */
31 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010032#if ENABLE_ASSERTIONS
Soby Mathewc704cbc2014-08-14 11:33:56 +010033 cmp x0, #0
34 ASM_ASSERT(ne)
35#endif
36
37 /* Get the cpu_ops reset handler */
38 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010039 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010040 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000041
42 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010043 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100441:
Soby Mathewc0884332014-09-22 12:11:36 +010045 ret
Kévin Petita877c252015-03-24 14:03:57 +000046endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010047
Roberto Vargase0e99462017-10-30 14:43:43 +000048#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010049
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090050#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010051 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000052 * void prepare_cpu_pwr_dwn(unsigned int power_level)
53 *
54 * Prepare CPU power down function for all platforms. The function takes
55 * a domain level to be powered down as its parameter. After the cpu_ops
56 * pointer is retrieved from cpu_data, the handler for requested power
57 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010058 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000059 .globl prepare_cpu_pwr_dwn
60func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010061 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000062 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
63 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010064 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000065 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
66 cmp x0, x2
67 csel x2, x2, x0, hi
68
Soby Mathew8e2f2872014-08-14 12:49:05 +010069 mrs x1, tpidr_el3
70 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010071#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010072 cmp x0, #0
73 ASM_ASSERT(ne)
74#endif
75
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000076 /* Get the appropriate power down handler */
77 mov x1, #CPU_PWR_DWN_OPS
78 add x1, x1, x2, lsl #3
79 ldr x1, [x0, x1]
Soby Mathew8e2f2872014-08-14 12:49:05 +010080 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000081endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010082
83
84 /*
85 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000086 * in cpu_data. This can be called without a runtime stack, but may
87 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010088 * clobbers: x0 - x6, x10
89 */
90 .globl init_cpu_ops
91func init_cpu_ops
92 mrs x6, tpidr_el3
93 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
94 cbnz x0, 1f
95 mov x10, x30
96 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010097#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010098 cmp x0, #0
99 ASM_ASSERT(ne)
100#endif
Soby Mathew7d861ea2014-11-18 10:14:14 +0000101 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100102 mov x30, x10
1031:
104 ret
Kévin Petita877c252015-03-24 14:03:57 +0000105endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100106#endif /* IMAGE_BL31 */
107
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900108#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100109 /*
110 * The cpu specific registers which need to be reported in a crash
111 * are reported via cpu_ops cpu_reg_dump function. After a matching
112 * cpu_ops structure entry is found, the correponding cpu_reg_dump
113 * in the cpu_ops is invoked.
114 */
115 .globl do_cpu_reg_dump
116func do_cpu_reg_dump
117 mov x16, x30
118
119 /* Get the matching cpu_ops pointer */
120 bl get_cpu_ops_ptr
121 cbz x0, 1f
122
123 /* Get the cpu_ops cpu_reg_dump */
124 ldr x2, [x0, #CPU_REG_DUMP]
125 cbz x2, 1f
126 blr x2
1271:
128 mov x30, x16
129 ret
Kévin Petita877c252015-03-24 14:03:57 +0000130endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100131#endif
132
Soby Mathewc704cbc2014-08-14 11:33:56 +0100133 /*
134 * The below function returns the cpu_ops structure matching the
135 * midr of the core. It reads the MIDR_EL1 and finds the matching
136 * entry in cpu_ops entries. Only the implementation and part number
137 * are used to match the entries.
138 * Return :
139 * x0 - The matching cpu_ops pointer on Success
140 * x0 - 0 on failure.
141 * Clobbers : x0 - x5
142 */
143 .globl get_cpu_ops_ptr
144func get_cpu_ops_ptr
145 /* Get the cpu_ops start and end locations */
146 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
147 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
148
149 /* Initialize the return parameter */
150 mov x0, #0
151
152 /* Read the MIDR_EL1 */
153 mrs x2, midr_el1
154 mov_imm x3, CPU_IMPL_PN_MASK
155
156 /* Retain only the implementation and part number using mask */
157 and w2, w2, w3
1581:
159 /* Check if we have reached end of list */
160 cmp x4, x5
161 b.eq error_exit
162
163 /* load the midr from the cpu_ops */
164 ldr x1, [x4], #CPU_OPS_SIZE
165 and w1, w1, w3
166
167 /* Check if midr matches to midr of this core */
168 cmp w1, w2
169 b.ne 1b
170
171 /* Subtract the increment and offset to get the cpu-ops pointer */
172 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
173error_exit:
174 ret
Kévin Petita877c252015-03-24 14:03:57 +0000175endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100176
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000177/*
178 * Extract CPU revision and variant, and combine them into a single numeric for
179 * easier comparison.
180 */
181 .globl cpu_get_rev_var
182func cpu_get_rev_var
183 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100184
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000185 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000186 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
187 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000188 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000189 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
190 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000191 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000192 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
193 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
194 ret
195endfunc cpu_get_rev_var
196
197/*
198 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
199 * application purposes. If the revision-variant is less than or same as a given
200 * value, indicates that errata applies; otherwise not.
201 */
202 .globl cpu_rev_var_ls
203func cpu_rev_var_ls
204 mov x2, #ERRATA_APPLIES
205 mov x3, #ERRATA_NOT_APPLIES
206 cmp x0, x1
207 csel x0, x2, x3, ls
208 ret
209endfunc cpu_rev_var_ls
210
Andre Przywara00eefd92016-10-06 16:54:53 +0100211/*
212 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
213 * application purposes. If the revision-variant is higher than or same as a
214 * given value, indicates that errata applies; otherwise not.
215 */
216 .globl cpu_rev_var_hs
217func cpu_rev_var_hs
218 mov x2, #ERRATA_APPLIES
219 mov x3, #ERRATA_NOT_APPLIES
220 cmp x0, x1
221 csel x0, x2, x3, hs
222 ret
223endfunc cpu_rev_var_hs
224
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000225#if REPORT_ERRATA
226/*
227 * void print_errata_status(void);
228 *
229 * Function to print errata status for CPUs of its class. Must be called only:
230 *
231 * - with MMU and data caches are enabled;
232 * - after cpu_ops have been initialized in per-CPU data.
233 */
234 .globl print_errata_status
235func print_errata_status
236#ifdef IMAGE_BL1
237 /*
238 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
239 * directly.
240 */
241 stp xzr, x30, [sp, #-16]!
242 bl get_cpu_ops_ptr
243 ldp xzr, x30, [sp], #16
244 ldr x1, [x0, #CPU_ERRATA_FUNC]
245 cbnz x1, .Lprint
246#else
247 /*
248 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
249 * errata printing function. If it's non-NULL, jump to the function in
250 * turn.
251 */
252 mrs x0, tpidr_el3
253 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
254 ldr x0, [x1, #CPU_ERRATA_FUNC]
255 cbz x0, .Lnoprint
256
257 /*
258 * Printing errata status requires atomically testing the printed flag.
259 */
dp-arm815faa82017-05-05 12:21:03 +0100260 stp x19, x30, [sp, #-16]!
261 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100262
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000263 /*
264 * Load pointers to errata lock and printed flag. Call
265 * errata_needs_reporting to check whether this CPU needs to report
266 * errata status pertaining to its class.
267 */
268 ldr x0, [x1, #CPU_ERRATA_LOCK]
269 ldr x1, [x1, #CPU_ERRATA_PRINTED]
270 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100271 mov x1, x19
272 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000273 cbnz x0, .Lprint
274#endif
275.Lnoprint:
276 ret
277.Lprint:
278 /* Jump to errata reporting function for this CPU */
279 br x1
280endfunc print_errata_status
281#endif
Dimitris Papastamos914757c2018-03-12 14:47:09 +0000282
283/*
284 * int check_workaround_cve_2017_5715(void);
285 *
286 * This function returns:
287 * - ERRATA_APPLIES when firmware mitigation is required.
288 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
289 * - ERRATA_MISSING when firmware mitigation would be required but
290 * is not compiled in.
291 *
292 * NOTE: Must be called only after cpu_ops have been initialized
293 * in per-CPU data.
294 */
295 .globl check_workaround_cve_2017_5715
296func check_workaround_cve_2017_5715
297 mrs x0, tpidr_el3
298#if ENABLE_ASSERTIONS
299 cmp x0, #0
300 ASM_ASSERT(ne)
301#endif
302 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
303 ldr x0, [x0, #CPU_EXTRA1_FUNC]
304 /*
305 * If the reserved function pointer is NULL, this CPU
306 * is unaffected by CVE-2017-5715 so bail out.
307 */
308 cmp x0, #0
309 beq 1f
310 br x0
3111:
312 mov x0, #ERRATA_NOT_APPLIES
313 ret
314endfunc check_workaround_cve_2017_5715