blob: ae1c3c25259700e28c222512e19afac80e22ac2b [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +00002 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewc704cbc2014-08-14 11:33:56 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Roberto Vargase0e99462017-10-30 14:43:43 +000010#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathewc704cbc2014-08-14 11:33:56 +010011#include <cpu_data.h>
12#endif
David Cunado1f5f8122017-01-17 14:40:15 +000013#include <cpu_macros.S>
Soby Mathew6b28c572016-03-21 10:36:47 +000014#include <debug.h>
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +000015#include <errata_report.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010016
17 /* Reset fn is needed in BL at reset vector */
Roberto Vargase0e99462017-10-30 14:43:43 +000018#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathewc704cbc2014-08-14 11:33:56 +010019 /*
20 * The reset handler common to all platforms. After a matching
21 * cpu_ops structure entry is found, the correponding reset_handler
22 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000023 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010024 */
25 .globl reset_handler
26func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010027 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010028
Soby Mathewb5a63042015-01-29 12:00:58 +000029 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010030 bl plat_reset_handler
31
Soby Mathewc704cbc2014-08-14 11:33:56 +010032 /* Get the matching cpu_ops pointer */
33 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010034#if ENABLE_ASSERTIONS
Soby Mathewc704cbc2014-08-14 11:33:56 +010035 cmp x0, #0
36 ASM_ASSERT(ne)
37#endif
38
39 /* Get the cpu_ops reset handler */
40 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010041 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010042 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000043
44 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010045 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100461:
Soby Mathewc0884332014-09-22 12:11:36 +010047 ret
Kévin Petita877c252015-03-24 14:03:57 +000048endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010049
Roberto Vargase0e99462017-10-30 14:43:43 +000050#endif
Soby Mathewc704cbc2014-08-14 11:33:56 +010051
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090052#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010053 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000054 * void prepare_cpu_pwr_dwn(unsigned int power_level)
55 *
56 * Prepare CPU power down function for all platforms. The function takes
57 * a domain level to be powered down as its parameter. After the cpu_ops
58 * pointer is retrieved from cpu_data, the handler for requested power
59 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010060 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000061 .globl prepare_cpu_pwr_dwn
62func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010063 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000064 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
65 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010066 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000067 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
68 cmp x0, x2
69 csel x2, x2, x0, hi
70
Soby Mathew8e2f2872014-08-14 12:49:05 +010071 mrs x1, tpidr_el3
72 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010073#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +010074 cmp x0, #0
75 ASM_ASSERT(ne)
76#endif
77
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000078 /* Get the appropriate power down handler */
79 mov x1, #CPU_PWR_DWN_OPS
80 add x1, x1, x2, lsl #3
81 ldr x1, [x0, x1]
Soby Mathew8e2f2872014-08-14 12:49:05 +010082 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000083endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010084
85
86 /*
87 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +000088 * in cpu_data. This can be called without a runtime stack, but may
89 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +010090 * clobbers: x0 - x6, x10
91 */
92 .globl init_cpu_ops
93func init_cpu_ops
94 mrs x6, tpidr_el3
95 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
96 cbnz x0, 1f
97 mov x10, x30
98 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010099#if ENABLE_ASSERTIONS
Soby Mathew8e2f2872014-08-14 12:49:05 +0100100 cmp x0, #0
101 ASM_ASSERT(ne)
102#endif
Soby Mathew7d861ea2014-11-18 10:14:14 +0000103 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100104 mov x30, x10
1051:
106 ret
Kévin Petita877c252015-03-24 14:03:57 +0000107endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100108#endif /* IMAGE_BL31 */
109
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900110#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100111 /*
112 * The cpu specific registers which need to be reported in a crash
113 * are reported via cpu_ops cpu_reg_dump function. After a matching
114 * cpu_ops structure entry is found, the correponding cpu_reg_dump
115 * in the cpu_ops is invoked.
116 */
117 .globl do_cpu_reg_dump
118func do_cpu_reg_dump
119 mov x16, x30
120
121 /* Get the matching cpu_ops pointer */
122 bl get_cpu_ops_ptr
123 cbz x0, 1f
124
125 /* Get the cpu_ops cpu_reg_dump */
126 ldr x2, [x0, #CPU_REG_DUMP]
127 cbz x2, 1f
128 blr x2
1291:
130 mov x30, x16
131 ret
Kévin Petita877c252015-03-24 14:03:57 +0000132endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100133#endif
134
Soby Mathewc704cbc2014-08-14 11:33:56 +0100135 /*
136 * The below function returns the cpu_ops structure matching the
137 * midr of the core. It reads the MIDR_EL1 and finds the matching
138 * entry in cpu_ops entries. Only the implementation and part number
139 * are used to match the entries.
140 * Return :
141 * x0 - The matching cpu_ops pointer on Success
142 * x0 - 0 on failure.
143 * Clobbers : x0 - x5
144 */
145 .globl get_cpu_ops_ptr
146func get_cpu_ops_ptr
147 /* Get the cpu_ops start and end locations */
148 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
149 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
150
151 /* Initialize the return parameter */
152 mov x0, #0
153
154 /* Read the MIDR_EL1 */
155 mrs x2, midr_el1
156 mov_imm x3, CPU_IMPL_PN_MASK
157
158 /* Retain only the implementation and part number using mask */
159 and w2, w2, w3
1601:
161 /* Check if we have reached end of list */
162 cmp x4, x5
163 b.eq error_exit
164
165 /* load the midr from the cpu_ops */
166 ldr x1, [x4], #CPU_OPS_SIZE
167 and w1, w1, w3
168
169 /* Check if midr matches to midr of this core */
170 cmp w1, w2
171 b.ne 1b
172
173 /* Subtract the increment and offset to get the cpu-ops pointer */
174 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
175error_exit:
176 ret
Kévin Petita877c252015-03-24 14:03:57 +0000177endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100178
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000179/*
180 * Extract CPU revision and variant, and combine them into a single numeric for
181 * easier comparison.
182 */
183 .globl cpu_get_rev_var
184func cpu_get_rev_var
185 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100186
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000187 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000188 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
189 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000190 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000191 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
192 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000193 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000194 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
195 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
196 ret
197endfunc cpu_get_rev_var
198
199/*
200 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
201 * application purposes. If the revision-variant is less than or same as a given
202 * value, indicates that errata applies; otherwise not.
203 */
204 .globl cpu_rev_var_ls
205func cpu_rev_var_ls
206 mov x2, #ERRATA_APPLIES
207 mov x3, #ERRATA_NOT_APPLIES
208 cmp x0, x1
209 csel x0, x2, x3, ls
210 ret
211endfunc cpu_rev_var_ls
212
Andre Przywara00eefd92016-10-06 16:54:53 +0100213/*
214 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
215 * application purposes. If the revision-variant is higher than or same as a
216 * given value, indicates that errata applies; otherwise not.
217 */
218 .globl cpu_rev_var_hs
219func cpu_rev_var_hs
220 mov x2, #ERRATA_APPLIES
221 mov x3, #ERRATA_NOT_APPLIES
222 cmp x0, x1
223 csel x0, x2, x3, hs
224 ret
225endfunc cpu_rev_var_hs
226
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000227#if REPORT_ERRATA
228/*
229 * void print_errata_status(void);
230 *
231 * Function to print errata status for CPUs of its class. Must be called only:
232 *
233 * - with MMU and data caches are enabled;
234 * - after cpu_ops have been initialized in per-CPU data.
235 */
236 .globl print_errata_status
237func print_errata_status
238#ifdef IMAGE_BL1
239 /*
240 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
241 * directly.
242 */
243 stp xzr, x30, [sp, #-16]!
244 bl get_cpu_ops_ptr
245 ldp xzr, x30, [sp], #16
246 ldr x1, [x0, #CPU_ERRATA_FUNC]
247 cbnz x1, .Lprint
248#else
249 /*
250 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
251 * errata printing function. If it's non-NULL, jump to the function in
252 * turn.
253 */
254 mrs x0, tpidr_el3
255 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
256 ldr x0, [x1, #CPU_ERRATA_FUNC]
257 cbz x0, .Lnoprint
258
259 /*
260 * Printing errata status requires atomically testing the printed flag.
261 */
dp-arm815faa82017-05-05 12:21:03 +0100262 stp x19, x30, [sp, #-16]!
263 mov x19, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100264
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000265 /*
266 * Load pointers to errata lock and printed flag. Call
267 * errata_needs_reporting to check whether this CPU needs to report
268 * errata status pertaining to its class.
269 */
270 ldr x0, [x1, #CPU_ERRATA_LOCK]
271 ldr x1, [x1, #CPU_ERRATA_PRINTED]
272 bl errata_needs_reporting
dp-arm815faa82017-05-05 12:21:03 +0100273 mov x1, x19
274 ldp x19, x30, [sp], #16
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000275 cbnz x0, .Lprint
276#endif
277.Lnoprint:
278 ret
279.Lprint:
280 /* Jump to errata reporting function for this CPU */
281 br x1
282endfunc print_errata_status
283#endif