blob: 6ed800cbc8a278386f7c156cfe283d47c0236725 [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
Varun Wadekar4d034c52019-01-11 14:47:48 -08002 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
Soby Mathew748be1d2016-05-05 14:10:46 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew748be1d2016-05-05 14:10:46 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Soby Mathew748be1d2016-05-05 14:10:46 +010010#include <cpu_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080011#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012#include <lib/el3_runtime/cpu_data.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010013
Roberto Vargase0e99462017-10-30 14:43:43 +000014#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathew748be1d2016-05-05 14:10:46 +010015 /*
16 * The reset handler common to all platforms. After a matching
17 * cpu_ops structure entry is found, the correponding reset_handler
18 * in the cpu_ops is invoked. The reset handler is invoked very early
19 * in the boot sequence and it is assumed that we can clobber r0 - r10
20 * without the need to follow AAPCS.
21 * Clobbers: r0 - r10
22 */
23 .globl reset_handler
24func reset_handler
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010025 mov r8, lr
Soby Mathew748be1d2016-05-05 14:10:46 +010026
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010027 /* The plat_reset_handler can clobber r0 - r7 */
Soby Mathew748be1d2016-05-05 14:10:46 +010028 bl plat_reset_handler
29
30 /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
31 bl get_cpu_ops_ptr
32
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010033#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +010034 cmp r0, #0
35 ASM_ASSERT(ne)
36#endif
37
38 /* Get the cpu_ops reset handler */
39 ldr r1, [r0, #CPU_RESET_FUNC]
40 cmp r1, #0
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010041 mov lr, r8
Soby Mathew748be1d2016-05-05 14:10:46 +010042 bxne r1
43 bx lr
44endfunc reset_handler
45
Roberto Vargase0e99462017-10-30 14:43:43 +000046#endif
Yatharth Kocharf528faf2016-06-28 16:58:26 +010047
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090048#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
Soby Mathew748be1d2016-05-05 14:10:46 +010049 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000050 * void prepare_cpu_pwr_dwn(unsigned int power_level)
51 *
52 * Prepare CPU power down function for all platforms. The function takes
53 * a domain level to be powered down as its parameter. After the cpu_ops
54 * pointer is retrieved from cpu_data, the handler for requested power
55 * level is called.
Soby Mathew748be1d2016-05-05 14:10:46 +010056 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000057 .globl prepare_cpu_pwr_dwn
58func prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010059 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000060 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
61 * power down handler for the last power level
Soby Mathew748be1d2016-05-05 14:10:46 +010062 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000063 mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
64 cmp r0, r2
65 movhi r0, r2
66
67 push {r0, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010068 bl _cpu_data
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000069 pop {r2, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010070
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000071 ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010072#if ENABLE_ASSERTIONS
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000073 cmp r0, #0
Soby Mathew748be1d2016-05-05 14:10:46 +010074 ASM_ASSERT(ne)
75#endif
76
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000077 /* Get the appropriate power down handler */
78 mov r1, #CPU_PWR_DWN_OPS
79 add r1, r1, r2, lsl #2
80 ldr r1, [r0, r1]
Yann Gautierb5800952021-02-23 14:50:44 +010081#if ENABLE_ASSERTIONS
82 cmp r1, #0
83 ASM_ASSERT(ne)
84#endif
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000085 bx r1
86endfunc prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010087
88 /*
89 * Initializes the cpu_ops_ptr if not already initialized
90 * in cpu_data. This must only be called after the data cache
91 * is enabled. AAPCS is followed.
92 */
93 .globl init_cpu_ops
94func init_cpu_ops
95 push {r4 - r6, lr}
96 bl _cpu_data
97 mov r6, r0
98 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
99 cmp r1, #0
100 bne 1f
101 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +0100102#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +0100103 cmp r0, #0
104 ASM_ASSERT(ne)
105#endif
106 str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1071:
108 pop {r4 - r6, pc}
109endfunc init_cpu_ops
110
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100111#endif /* IMAGE_BL32 */
112
Soby Mathew748be1d2016-05-05 14:10:46 +0100113 /*
114 * The below function returns the cpu_ops structure matching the
115 * midr of the core. It reads the MIDR and finds the matching
116 * entry in cpu_ops entries. Only the implementation and part number
117 * are used to match the entries.
118 * Return :
119 * r0 - The matching cpu_ops pointer on Success
120 * r0 - 0 on failure.
121 * Clobbers: r0 - r5
122 */
123 .globl get_cpu_ops_ptr
124func get_cpu_ops_ptr
125 /* Get the cpu_ops start and end locations */
126 ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
127 ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
128
129 /* Initialize the return parameter */
130 mov r0, #0
131
132 /* Read the MIDR_EL1 */
133 ldcopr r2, MIDR
134 ldr r3, =CPU_IMPL_PN_MASK
135
136 /* Retain only the implementation and part number using mask */
137 and r2, r2, r3
1381:
139 /* Check if we have reached end of list */
140 cmp r4, r5
Douglas Raillard9d92e8c2017-03-07 16:36:14 +0000141 bhs error_exit
Soby Mathew748be1d2016-05-05 14:10:46 +0100142
143 /* load the midr from the cpu_ops */
144 ldr r1, [r4], #CPU_OPS_SIZE
145 and r1, r1, r3
146
147 /* Check if midr matches to midr of this core */
148 cmp r1, r2
149 bne 1b
150
151 /* Subtract the increment and offset to get the cpu-ops pointer */
152 sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
Yann Gautierb5800952021-02-23 14:50:44 +0100153#if ENABLE_ASSERTIONS
154 cmp r0, #0
155 ASM_ASSERT(ne)
156#endif
Soby Mathew748be1d2016-05-05 14:10:46 +0100157error_exit:
158 bx lr
159endfunc get_cpu_ops_ptr
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000160
161/*
162 * Extract CPU revision and variant, and combine them into a single numeric for
163 * easier comparison.
164 */
165 .globl cpu_get_rev_var
166func cpu_get_rev_var
167 ldcopr r1, MIDR
168
169 /*
170 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
171 * r0[0:7] as variant[7:4] and revision[3:0]:
172 *
173 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
174 * extract r1[3:0] into r0[3:0] retaining other bits.
175 */
176 ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
177 bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
178 bx lr
179endfunc cpu_get_rev_var
180
181/*
182 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
183 * application purposes. If the revision-variant is less than or same as a given
184 * value, indicates that errata applies; otherwise not.
185 */
186 .globl cpu_rev_var_ls
187func cpu_rev_var_ls
188 cmp r0, r1
189 movls r0, #ERRATA_APPLIES
190 movhi r0, #ERRATA_NOT_APPLIES
191 bx lr
192endfunc cpu_rev_var_ls
193
Dimitris Papastamos370542e2017-06-05 13:36:34 +0100194/*
195 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
196 * application purposes. If the revision-variant is higher than or same as a
197 * given value, indicates that errata applies; otherwise not.
198 */
199 .globl cpu_rev_var_hs
200func cpu_rev_var_hs
201 cmp r0, r1
202 movge r0, #ERRATA_APPLIES
203 movlt r0, #ERRATA_NOT_APPLIES
204 bx lr
205endfunc cpu_rev_var_hs
206
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000207#if REPORT_ERRATA
208/*
209 * void print_errata_status(void);
210 *
211 * Function to print errata status for CPUs of its class. Must be called only:
212 *
213 * - with MMU and data caches are enabled;
214 * - after cpu_ops have been initialized in per-CPU data.
215 */
216 .globl print_errata_status
217func print_errata_status
Soby Mathewd1e19302018-02-21 15:48:03 +0000218 /* r12 is pushed only for the sake of 8-byte stack alignment */
219 push {r4, r5, r12, lr}
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000220#ifdef IMAGE_BL1
221 /*
222 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
223 * directly.
224 */
225 bl get_cpu_ops_ptr
226 ldr r0, [r0, #CPU_ERRATA_FUNC]
227 cmp r0, #0
228 blxne r0
229#else
230 /*
231 * Retrieve pointer to cpu_ops, and further, the errata printing
232 * function. If it's non-NULL, jump to the function in turn.
233 */
234 bl _cpu_data
Yann Gautierb5800952021-02-23 14:50:44 +0100235#if ENABLE_ASSERTIONS
236 cmp r0, #0
237 ASM_ASSERT(ne)
238#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000239 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
Yann Gautierb5800952021-02-23 14:50:44 +0100240#if ENABLE_ASSERTIONS
241 cmp r1, #0
242 ASM_ASSERT(ne)
243#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000244 ldr r0, [r1, #CPU_ERRATA_FUNC]
245 cmp r0, #0
246 beq 1f
247
248 mov r4, r0
249
250 /*
251 * Load pointers to errata lock and printed flag. Call
252 * errata_needs_reporting to check whether this CPU needs to report
253 * errata status pertaining to its class.
254 */
255 ldr r0, [r1, #CPU_ERRATA_LOCK]
256 ldr r1, [r1, #CPU_ERRATA_PRINTED]
257 bl errata_needs_reporting
258 cmp r0, #0
259 blxne r4
2601:
261#endif
Soby Mathewd1e19302018-02-21 15:48:03 +0000262 pop {r4, r5, r12, pc}
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000263endfunc print_errata_status
264#endif