blob: e25ce2a59bfa8fbfac29373e8007f5e747987c3a [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -06002 * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
Soby Mathew748be1d2016-05-05 14:10:46 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew748be1d2016-05-05 14:10:46 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Soby Mathew748be1d2016-05-05 14:10:46 +010010#include <cpu_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080011#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012#include <lib/el3_runtime/cpu_data.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010013
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -060014#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
15 (defined(IMAGE_BL2) && RESET_TO_BL2)
Soby Mathew748be1d2016-05-05 14:10:46 +010016 /*
17 * The reset handler common to all platforms. After a matching
18 * cpu_ops structure entry is found, the correponding reset_handler
19 * in the cpu_ops is invoked. The reset handler is invoked very early
20 * in the boot sequence and it is assumed that we can clobber r0 - r10
21 * without the need to follow AAPCS.
22 * Clobbers: r0 - r10
23 */
24 .globl reset_handler
25func reset_handler
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010026 mov r8, lr
Soby Mathew748be1d2016-05-05 14:10:46 +010027
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010028 /* The plat_reset_handler can clobber r0 - r7 */
Soby Mathew748be1d2016-05-05 14:10:46 +010029 bl plat_reset_handler
30
31 /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
32 bl get_cpu_ops_ptr
33
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010034#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +010035 cmp r0, #0
36 ASM_ASSERT(ne)
37#endif
38
39 /* Get the cpu_ops reset handler */
40 ldr r1, [r0, #CPU_RESET_FUNC]
41 cmp r1, #0
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010042 mov lr, r8
Soby Mathew748be1d2016-05-05 14:10:46 +010043 bxne r1
44 bx lr
45endfunc reset_handler
46
Roberto Vargase0e99462017-10-30 14:43:43 +000047#endif
Yatharth Kocharf528faf2016-06-28 16:58:26 +010048
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090049#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
Soby Mathew748be1d2016-05-05 14:10:46 +010050 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000051 * void prepare_cpu_pwr_dwn(unsigned int power_level)
52 *
53 * Prepare CPU power down function for all platforms. The function takes
54 * a domain level to be powered down as its parameter. After the cpu_ops
55 * pointer is retrieved from cpu_data, the handler for requested power
56 * level is called.
Soby Mathew748be1d2016-05-05 14:10:46 +010057 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000058 .globl prepare_cpu_pwr_dwn
59func prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010060 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000061 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
62 * power down handler for the last power level
Soby Mathew748be1d2016-05-05 14:10:46 +010063 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000064 mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
65 cmp r0, r2
66 movhi r0, r2
67
68 push {r0, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010069 bl _cpu_data
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000070 pop {r2, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010071
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000072 ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010073#if ENABLE_ASSERTIONS
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000074 cmp r0, #0
Soby Mathew748be1d2016-05-05 14:10:46 +010075 ASM_ASSERT(ne)
76#endif
77
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000078 /* Get the appropriate power down handler */
79 mov r1, #CPU_PWR_DWN_OPS
80 add r1, r1, r2, lsl #2
81 ldr r1, [r0, r1]
Yann Gautierb5800952021-02-23 14:50:44 +010082#if ENABLE_ASSERTIONS
83 cmp r1, #0
84 ASM_ASSERT(ne)
85#endif
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000086 bx r1
87endfunc prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010088
89 /*
90 * Initializes the cpu_ops_ptr if not already initialized
91 * in cpu_data. This must only be called after the data cache
92 * is enabled. AAPCS is followed.
93 */
94 .globl init_cpu_ops
95func init_cpu_ops
96 push {r4 - r6, lr}
97 bl _cpu_data
98 mov r6, r0
99 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
100 cmp r1, #0
101 bne 1f
102 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +0100103#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +0100104 cmp r0, #0
105 ASM_ASSERT(ne)
106#endif
107 str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1081:
109 pop {r4 - r6, pc}
110endfunc init_cpu_ops
111
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100112#endif /* IMAGE_BL32 */
113
Soby Mathew748be1d2016-05-05 14:10:46 +0100114 /*
115 * The below function returns the cpu_ops structure matching the
116 * midr of the core. It reads the MIDR and finds the matching
117 * entry in cpu_ops entries. Only the implementation and part number
118 * are used to match the entries.
119 * Return :
120 * r0 - The matching cpu_ops pointer on Success
121 * r0 - 0 on failure.
122 * Clobbers: r0 - r5
123 */
124 .globl get_cpu_ops_ptr
125func get_cpu_ops_ptr
126 /* Get the cpu_ops start and end locations */
127 ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
128 ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
129
130 /* Initialize the return parameter */
131 mov r0, #0
132
133 /* Read the MIDR_EL1 */
134 ldcopr r2, MIDR
135 ldr r3, =CPU_IMPL_PN_MASK
136
137 /* Retain only the implementation and part number using mask */
138 and r2, r2, r3
1391:
140 /* Check if we have reached end of list */
141 cmp r4, r5
Douglas Raillard9d92e8c2017-03-07 16:36:14 +0000142 bhs error_exit
Soby Mathew748be1d2016-05-05 14:10:46 +0100143
144 /* load the midr from the cpu_ops */
145 ldr r1, [r4], #CPU_OPS_SIZE
146 and r1, r1, r3
147
148 /* Check if midr matches to midr of this core */
149 cmp r1, r2
150 bne 1b
151
152 /* Subtract the increment and offset to get the cpu-ops pointer */
153 sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
Yann Gautierb5800952021-02-23 14:50:44 +0100154#if ENABLE_ASSERTIONS
155 cmp r0, #0
156 ASM_ASSERT(ne)
157#endif
Soby Mathew748be1d2016-05-05 14:10:46 +0100158error_exit:
159 bx lr
160endfunc get_cpu_ops_ptr
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000161
162/*
163 * Extract CPU revision and variant, and combine them into a single numeric for
164 * easier comparison.
165 */
166 .globl cpu_get_rev_var
167func cpu_get_rev_var
168 ldcopr r1, MIDR
169
170 /*
171 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
172 * r0[0:7] as variant[7:4] and revision[3:0]:
173 *
174 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
175 * extract r1[3:0] into r0[3:0] retaining other bits.
176 */
177 ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
178 bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
179 bx lr
180endfunc cpu_get_rev_var
181
182/*
183 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
184 * application purposes. If the revision-variant is less than or same as a given
185 * value, indicates that errata applies; otherwise not.
186 */
187 .globl cpu_rev_var_ls
188func cpu_rev_var_ls
189 cmp r0, r1
190 movls r0, #ERRATA_APPLIES
191 movhi r0, #ERRATA_NOT_APPLIES
192 bx lr
193endfunc cpu_rev_var_ls
194
Dimitris Papastamos370542e2017-06-05 13:36:34 +0100195/*
196 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
197 * application purposes. If the revision-variant is higher than or same as a
198 * given value, indicates that errata applies; otherwise not.
199 */
200 .globl cpu_rev_var_hs
201func cpu_rev_var_hs
202 cmp r0, r1
203 movge r0, #ERRATA_APPLIES
204 movlt r0, #ERRATA_NOT_APPLIES
205 bx lr
206endfunc cpu_rev_var_hs
207
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000208#if REPORT_ERRATA
209/*
210 * void print_errata_status(void);
211 *
212 * Function to print errata status for CPUs of its class. Must be called only:
213 *
214 * - with MMU and data caches are enabled;
215 * - after cpu_ops have been initialized in per-CPU data.
216 */
217 .globl print_errata_status
218func print_errata_status
Soby Mathewd1e19302018-02-21 15:48:03 +0000219 /* r12 is pushed only for the sake of 8-byte stack alignment */
220 push {r4, r5, r12, lr}
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000221#ifdef IMAGE_BL1
222 /*
223 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
224 * directly.
225 */
226 bl get_cpu_ops_ptr
227 ldr r0, [r0, #CPU_ERRATA_FUNC]
228 cmp r0, #0
229 blxne r0
230#else
231 /*
232 * Retrieve pointer to cpu_ops, and further, the errata printing
233 * function. If it's non-NULL, jump to the function in turn.
234 */
235 bl _cpu_data
Yann Gautierb5800952021-02-23 14:50:44 +0100236#if ENABLE_ASSERTIONS
237 cmp r0, #0
238 ASM_ASSERT(ne)
239#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000240 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
Yann Gautierb5800952021-02-23 14:50:44 +0100241#if ENABLE_ASSERTIONS
242 cmp r1, #0
243 ASM_ASSERT(ne)
244#endif
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000245 ldr r0, [r1, #CPU_ERRATA_FUNC]
246 cmp r0, #0
247 beq 1f
248
249 mov r4, r0
250
251 /*
252 * Load pointers to errata lock and printed flag. Call
253 * errata_needs_reporting to check whether this CPU needs to report
254 * errata status pertaining to its class.
255 */
256 ldr r0, [r1, #CPU_ERRATA_LOCK]
257 ldr r1, [r1, #CPU_ERRATA_PRINTED]
258 bl errata_needs_reporting
259 cmp r0, #0
260 blxne r4
2611:
262#endif
Soby Mathewd1e19302018-02-21 15:48:03 +0000263 pop {r4, r5, r12, pc}
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000264endfunc print_errata_status
265#endif