blob: 47cb6a2deda956e60bd32abe3d7a7339dc62cff2 [file] [log] [blame]
Soby Mathewc704cbc2014-08-14 11:33:56 +01001/*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +00002 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathewc704cbc2014-08-14 11:33:56 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <assert_macros.S>
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090034#ifdef IMAGE_BL31
Soby Mathewc704cbc2014-08-14 11:33:56 +010035#include <cpu_data.h>
36#endif
David Cunado1f5f8122017-01-17 14:40:15 +000037#include <cpu_macros.S>
Soby Mathew6b28c572016-03-21 10:36:47 +000038#include <debug.h>
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +000039#include <errata_report.h>
Soby Mathewc704cbc2014-08-14 11:33:56 +010040
41 /* Reset fn is needed in BL at reset vector */
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090042#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
Soby Mathewc704cbc2014-08-14 11:33:56 +010043 /*
44 * The reset handler common to all platforms. After a matching
45 * cpu_ops structure entry is found, the correponding reset_handler
46 * in the cpu_ops is invoked.
Soby Mathewb5a63042015-01-29 12:00:58 +000047 * Clobbers: x0 - x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010048 */
49 .globl reset_handler
50func reset_handler
Soby Mathewc0884332014-09-22 12:11:36 +010051 mov x19, x30
Soby Mathewc704cbc2014-08-14 11:33:56 +010052
Soby Mathewb5a63042015-01-29 12:00:58 +000053 /* The plat_reset_handler can clobber x0 - x18, x30 */
Soby Mathewf1785fd2014-08-14 12:22:32 +010054 bl plat_reset_handler
55
Soby Mathewc704cbc2014-08-14 11:33:56 +010056 /* Get the matching cpu_ops pointer */
57 bl get_cpu_ops_ptr
58#if ASM_ASSERTION
59 cmp x0, #0
60 ASM_ASSERT(ne)
61#endif
62
63 /* Get the cpu_ops reset handler */
64 ldr x2, [x0, #CPU_RESET_FUNC]
Soby Mathewc0884332014-09-22 12:11:36 +010065 mov x30, x19
Soby Mathewc704cbc2014-08-14 11:33:56 +010066 cbz x2, 1f
Soby Mathewb5a63042015-01-29 12:00:58 +000067
68 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
Soby Mathewc0884332014-09-22 12:11:36 +010069 br x2
Soby Mathewc704cbc2014-08-14 11:33:56 +0100701:
Soby Mathewc0884332014-09-22 12:11:36 +010071 ret
Kévin Petita877c252015-03-24 14:03:57 +000072endfunc reset_handler
Soby Mathewf1785fd2014-08-14 12:22:32 +010073
Yatharth Kochar36433d12014-11-20 18:09:41 +000074#endif /* IMAGE_BL1 || IMAGE_BL31 */
Soby Mathewc704cbc2014-08-14 11:33:56 +010075
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090076#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
Soby Mathew8e2f2872014-08-14 12:49:05 +010077 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000078 * void prepare_cpu_pwr_dwn(unsigned int power_level)
79 *
80 * Prepare CPU power down function for all platforms. The function takes
81 * a domain level to be powered down as its parameter. After the cpu_ops
82 * pointer is retrieved from cpu_data, the handler for requested power
83 * level is called.
Soby Mathew8e2f2872014-08-14 12:49:05 +010084 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000085 .globl prepare_cpu_pwr_dwn
86func prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +010087 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000088 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
89 * power down handler for the last power level
Soby Mathew8e2f2872014-08-14 12:49:05 +010090 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000091 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
92 cmp x0, x2
93 csel x2, x2, x0, hi
94
Soby Mathew8e2f2872014-08-14 12:49:05 +010095 mrs x1, tpidr_el3
96 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
97#if ASM_ASSERTION
98 cmp x0, #0
99 ASM_ASSERT(ne)
100#endif
101
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +0000102 /* Get the appropriate power down handler */
103 mov x1, #CPU_PWR_DWN_OPS
104 add x1, x1, x2, lsl #3
105 ldr x1, [x0, x1]
Soby Mathew8e2f2872014-08-14 12:49:05 +0100106 br x1
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +0000107endfunc prepare_cpu_pwr_dwn
Soby Mathew8e2f2872014-08-14 12:49:05 +0100108
109
110 /*
111 * Initializes the cpu_ops_ptr if not already initialized
Vikram Kanigiri9b38fc82015-01-29 18:27:38 +0000112 * in cpu_data. This can be called without a runtime stack, but may
113 * only be called after the MMU is enabled.
Soby Mathew8e2f2872014-08-14 12:49:05 +0100114 * clobbers: x0 - x6, x10
115 */
116 .globl init_cpu_ops
117func init_cpu_ops
118 mrs x6, tpidr_el3
119 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
120 cbnz x0, 1f
121 mov x10, x30
122 bl get_cpu_ops_ptr
123#if ASM_ASSERTION
124 cmp x0, #0
125 ASM_ASSERT(ne)
126#endif
Soby Mathew7d861ea2014-11-18 10:14:14 +0000127 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
Soby Mathew8e2f2872014-08-14 12:49:05 +0100128 mov x30, x10
1291:
130 ret
Kévin Petita877c252015-03-24 14:03:57 +0000131endfunc init_cpu_ops
Soby Mathew8e2f2872014-08-14 12:49:05 +0100132#endif /* IMAGE_BL31 */
133
Masahiro Yamada441bfdd2016-12-25 23:36:24 +0900134#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathew38b4bc92014-08-14 13:36:41 +0100135 /*
136 * The cpu specific registers which need to be reported in a crash
137 * are reported via cpu_ops cpu_reg_dump function. After a matching
138 * cpu_ops structure entry is found, the correponding cpu_reg_dump
139 * in the cpu_ops is invoked.
140 */
141 .globl do_cpu_reg_dump
142func do_cpu_reg_dump
143 mov x16, x30
144
145 /* Get the matching cpu_ops pointer */
146 bl get_cpu_ops_ptr
147 cbz x0, 1f
148
149 /* Get the cpu_ops cpu_reg_dump */
150 ldr x2, [x0, #CPU_REG_DUMP]
151 cbz x2, 1f
152 blr x2
1531:
154 mov x30, x16
155 ret
Kévin Petita877c252015-03-24 14:03:57 +0000156endfunc do_cpu_reg_dump
Soby Mathew38b4bc92014-08-14 13:36:41 +0100157#endif
158
Soby Mathewc704cbc2014-08-14 11:33:56 +0100159 /*
160 * The below function returns the cpu_ops structure matching the
161 * midr of the core. It reads the MIDR_EL1 and finds the matching
162 * entry in cpu_ops entries. Only the implementation and part number
163 * are used to match the entries.
164 * Return :
165 * x0 - The matching cpu_ops pointer on Success
166 * x0 - 0 on failure.
167 * Clobbers : x0 - x5
168 */
169 .globl get_cpu_ops_ptr
170func get_cpu_ops_ptr
171 /* Get the cpu_ops start and end locations */
172 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
173 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
174
175 /* Initialize the return parameter */
176 mov x0, #0
177
178 /* Read the MIDR_EL1 */
179 mrs x2, midr_el1
180 mov_imm x3, CPU_IMPL_PN_MASK
181
182 /* Retain only the implementation and part number using mask */
183 and w2, w2, w3
1841:
185 /* Check if we have reached end of list */
186 cmp x4, x5
187 b.eq error_exit
188
189 /* load the midr from the cpu_ops */
190 ldr x1, [x4], #CPU_OPS_SIZE
191 and w1, w1, w3
192
193 /* Check if midr matches to midr of this core */
194 cmp w1, w2
195 b.ne 1b
196
197 /* Subtract the increment and offset to get the cpu-ops pointer */
198 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
199error_exit:
200 ret
Kévin Petita877c252015-03-24 14:03:57 +0000201endfunc get_cpu_ops_ptr
Soby Mathewc0884332014-09-22 12:11:36 +0100202
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000203/*
204 * Extract CPU revision and variant, and combine them into a single numeric for
205 * easier comparison.
206 */
207 .globl cpu_get_rev_var
208func cpu_get_rev_var
209 mrs x1, midr_el1
Soby Mathewc0884332014-09-22 12:11:36 +0100210
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000211 /*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000212 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
213 * as variant[7:4] and revision[3:0] of x0.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000214 *
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000215 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
216 * extract x1[3:0] into x0[3:0] retaining other bits.
Sandrine Bailleuxd4817592016-01-13 14:57:38 +0000217 */
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000218 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
219 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
220 ret
221endfunc cpu_get_rev_var
222
223/*
224 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
225 * application purposes. If the revision-variant is less than or same as a given
226 * value, indicates that errata applies; otherwise not.
227 */
228 .globl cpu_rev_var_ls
229func cpu_rev_var_ls
230 mov x2, #ERRATA_APPLIES
231 mov x3, #ERRATA_NOT_APPLIES
232 cmp x0, x1
233 csel x0, x2, x3, ls
234 ret
235endfunc cpu_rev_var_ls
236
Andre Przywara00eefd92016-10-06 16:54:53 +0100237/*
238 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
239 * application purposes. If the revision-variant is higher than or same as a
240 * given value, indicates that errata applies; otherwise not.
241 */
242 .globl cpu_rev_var_hs
243func cpu_rev_var_hs
244 mov x2, #ERRATA_APPLIES
245 mov x3, #ERRATA_NOT_APPLIES
246 cmp x0, x1
247 csel x0, x2, x3, hs
248 ret
249endfunc cpu_rev_var_hs
250
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000251#if REPORT_ERRATA
252/*
253 * void print_errata_status(void);
254 *
255 * Function to print errata status for CPUs of its class. Must be called only:
256 *
257 * - with MMU and data caches are enabled;
258 * - after cpu_ops have been initialized in per-CPU data.
259 */
260 .globl print_errata_status
261func print_errata_status
262#ifdef IMAGE_BL1
263 /*
264 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
265 * directly.
266 */
267 stp xzr, x30, [sp, #-16]!
268 bl get_cpu_ops_ptr
269 ldp xzr, x30, [sp], #16
270 ldr x1, [x0, #CPU_ERRATA_FUNC]
271 cbnz x1, .Lprint
272#else
273 /*
274 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
275 * errata printing function. If it's non-NULL, jump to the function in
276 * turn.
277 */
278 mrs x0, tpidr_el3
279 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
280 ldr x0, [x1, #CPU_ERRATA_FUNC]
281 cbz x0, .Lnoprint
282
283 /*
284 * Printing errata status requires atomically testing the printed flag.
285 */
286 stp x8, x30, [sp, #-16]!
287 mov x8, x0
Soby Mathewc0884332014-09-22 12:11:36 +0100288
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000289 /*
290 * Load pointers to errata lock and printed flag. Call
291 * errata_needs_reporting to check whether this CPU needs to report
292 * errata status pertaining to its class.
293 */
294 ldr x0, [x1, #CPU_ERRATA_LOCK]
295 ldr x1, [x1, #CPU_ERRATA_PRINTED]
296 bl errata_needs_reporting
297 mov x1, x8
298 ldp x8, x30, [sp], #16
299 cbnz x0, .Lprint
300#endif
301.Lnoprint:
302 ret
303.Lprint:
304 /* Jump to errata reporting function for this CPU */
305 br x1
306endfunc print_errata_status
307#endif