blob: 9b5d787ed72b7aee85074157d4115024b84d4c9b [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
Varun Wadekar4d034c52019-01-11 14:47:48 -08002 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
Soby Mathew748be1d2016-05-05 14:10:46 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew748be1d2016-05-05 14:10:46 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Soby Mathew748be1d2016-05-05 14:10:46 +010010#include <cpu_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080011#include <common/bl_common.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012#include <lib/el3_runtime/cpu_data.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010013
Roberto Vargase0e99462017-10-30 14:43:43 +000014#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathew748be1d2016-05-05 14:10:46 +010015 /*
16 * The reset handler common to all platforms. After a matching
17 * cpu_ops structure entry is found, the correponding reset_handler
18 * in the cpu_ops is invoked. The reset handler is invoked very early
19 * in the boot sequence and it is assumed that we can clobber r0 - r10
20 * without the need to follow AAPCS.
21 * Clobbers: r0 - r10
22 */
23 .globl reset_handler
24func reset_handler
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010025 mov r8, lr
Soby Mathew748be1d2016-05-05 14:10:46 +010026
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010027 /* The plat_reset_handler can clobber r0 - r7 */
Soby Mathew748be1d2016-05-05 14:10:46 +010028 bl plat_reset_handler
29
30 /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
31 bl get_cpu_ops_ptr
32
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010033#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +010034 cmp r0, #0
35 ASM_ASSERT(ne)
36#endif
37
38 /* Get the cpu_ops reset handler */
39 ldr r1, [r0, #CPU_RESET_FUNC]
40 cmp r1, #0
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010041 mov lr, r8
Soby Mathew748be1d2016-05-05 14:10:46 +010042 bxne r1
43 bx lr
44endfunc reset_handler
45
Roberto Vargase0e99462017-10-30 14:43:43 +000046#endif
Yatharth Kocharf528faf2016-06-28 16:58:26 +010047
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090048#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
Soby Mathew748be1d2016-05-05 14:10:46 +010049 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000050 * void prepare_cpu_pwr_dwn(unsigned int power_level)
51 *
52 * Prepare CPU power down function for all platforms. The function takes
53 * a domain level to be powered down as its parameter. After the cpu_ops
54 * pointer is retrieved from cpu_data, the handler for requested power
55 * level is called.
Soby Mathew748be1d2016-05-05 14:10:46 +010056 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000057 .globl prepare_cpu_pwr_dwn
58func prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010059 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000060 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
61 * power down handler for the last power level
Soby Mathew748be1d2016-05-05 14:10:46 +010062 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000063 mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
64 cmp r0, r2
65 movhi r0, r2
66
67 push {r0, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010068 bl _cpu_data
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000069 pop {r2, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010070
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000071 ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010072#if ENABLE_ASSERTIONS
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000073 cmp r0, #0
Soby Mathew748be1d2016-05-05 14:10:46 +010074 ASM_ASSERT(ne)
75#endif
76
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000077 /* Get the appropriate power down handler */
78 mov r1, #CPU_PWR_DWN_OPS
79 add r1, r1, r2, lsl #2
80 ldr r1, [r0, r1]
81 bx r1
82endfunc prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010083
84 /*
85 * Initializes the cpu_ops_ptr if not already initialized
86 * in cpu_data. This must only be called after the data cache
87 * is enabled. AAPCS is followed.
88 */
89 .globl init_cpu_ops
90func init_cpu_ops
91 push {r4 - r6, lr}
92 bl _cpu_data
93 mov r6, r0
94 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
95 cmp r1, #0
96 bne 1f
97 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010098#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +010099 cmp r0, #0
100 ASM_ASSERT(ne)
101#endif
102 str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1031:
104 pop {r4 - r6, pc}
105endfunc init_cpu_ops
106
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100107#endif /* IMAGE_BL32 */
108
Soby Mathew748be1d2016-05-05 14:10:46 +0100109 /*
110 * The below function returns the cpu_ops structure matching the
111 * midr of the core. It reads the MIDR and finds the matching
112 * entry in cpu_ops entries. Only the implementation and part number
113 * are used to match the entries.
114 * Return :
115 * r0 - The matching cpu_ops pointer on Success
116 * r0 - 0 on failure.
117 * Clobbers: r0 - r5
118 */
119 .globl get_cpu_ops_ptr
120func get_cpu_ops_ptr
121 /* Get the cpu_ops start and end locations */
122 ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
123 ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
124
125 /* Initialize the return parameter */
126 mov r0, #0
127
128 /* Read the MIDR_EL1 */
129 ldcopr r2, MIDR
130 ldr r3, =CPU_IMPL_PN_MASK
131
132 /* Retain only the implementation and part number using mask */
133 and r2, r2, r3
1341:
135 /* Check if we have reached end of list */
136 cmp r4, r5
Douglas Raillard9d92e8c2017-03-07 16:36:14 +0000137 bhs error_exit
Soby Mathew748be1d2016-05-05 14:10:46 +0100138
139 /* load the midr from the cpu_ops */
140 ldr r1, [r4], #CPU_OPS_SIZE
141 and r1, r1, r3
142
143 /* Check if midr matches to midr of this core */
144 cmp r1, r2
145 bne 1b
146
147 /* Subtract the increment and offset to get the cpu-ops pointer */
148 sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
149error_exit:
150 bx lr
151endfunc get_cpu_ops_ptr
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000152
153/*
154 * Extract CPU revision and variant, and combine them into a single numeric for
155 * easier comparison.
156 */
157 .globl cpu_get_rev_var
158func cpu_get_rev_var
159 ldcopr r1, MIDR
160
161 /*
162 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
163 * r0[0:7] as variant[7:4] and revision[3:0]:
164 *
165 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
166 * extract r1[3:0] into r0[3:0] retaining other bits.
167 */
168 ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
169 bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
170 bx lr
171endfunc cpu_get_rev_var
172
173/*
174 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
175 * application purposes. If the revision-variant is less than or same as a given
176 * value, indicates that errata applies; otherwise not.
177 */
178 .globl cpu_rev_var_ls
179func cpu_rev_var_ls
180 cmp r0, r1
181 movls r0, #ERRATA_APPLIES
182 movhi r0, #ERRATA_NOT_APPLIES
183 bx lr
184endfunc cpu_rev_var_ls
185
Dimitris Papastamos370542e2017-06-05 13:36:34 +0100186/*
187 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
188 * application purposes. If the revision-variant is higher than or same as a
189 * given value, indicates that errata applies; otherwise not.
190 */
191 .globl cpu_rev_var_hs
192func cpu_rev_var_hs
193 cmp r0, r1
194 movge r0, #ERRATA_APPLIES
195 movlt r0, #ERRATA_NOT_APPLIES
196 bx lr
197endfunc cpu_rev_var_hs
198
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000199#if REPORT_ERRATA
200/*
201 * void print_errata_status(void);
202 *
203 * Function to print errata status for CPUs of its class. Must be called only:
204 *
205 * - with MMU and data caches are enabled;
206 * - after cpu_ops have been initialized in per-CPU data.
207 */
208 .globl print_errata_status
209func print_errata_status
Soby Mathewd1e19302018-02-21 15:48:03 +0000210 /* r12 is pushed only for the sake of 8-byte stack alignment */
211 push {r4, r5, r12, lr}
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000212#ifdef IMAGE_BL1
213 /*
214 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
215 * directly.
216 */
217 bl get_cpu_ops_ptr
218 ldr r0, [r0, #CPU_ERRATA_FUNC]
219 cmp r0, #0
220 blxne r0
221#else
222 /*
223 * Retrieve pointer to cpu_ops, and further, the errata printing
224 * function. If it's non-NULL, jump to the function in turn.
225 */
226 bl _cpu_data
227 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
228 ldr r0, [r1, #CPU_ERRATA_FUNC]
229 cmp r0, #0
230 beq 1f
231
232 mov r4, r0
233
234 /*
235 * Load pointers to errata lock and printed flag. Call
236 * errata_needs_reporting to check whether this CPU needs to report
237 * errata status pertaining to its class.
238 */
239 ldr r0, [r1, #CPU_ERRATA_LOCK]
240 ldr r1, [r1, #CPU_ERRATA_PRINTED]
241 bl errata_needs_reporting
242 cmp r0, #0
243 blxne r4
2441:
245#endif
Soby Mathewd1e19302018-02-21 15:48:03 +0000246 pop {r4, r5, r12, pc}
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000247endfunc print_errata_status
248#endif