blob: ddc0808421b0c57694a2737a8d7ba5b004c72cca [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew748be1d2016-05-05 14:10:46 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew748be1d2016-05-05 14:10:46 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <cpu_data.h>
11#include <cpu_macros.S>
12
Roberto Vargase0e99462017-10-30 14:43:43 +000013#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
Soby Mathew748be1d2016-05-05 14:10:46 +010014 /*
15 * The reset handler common to all platforms. After a matching
16 * cpu_ops structure entry is found, the correponding reset_handler
17 * in the cpu_ops is invoked. The reset handler is invoked very early
18 * in the boot sequence and it is assumed that we can clobber r0 - r10
19 * without the need to follow AAPCS.
20 * Clobbers: r0 - r10
21 */
22 .globl reset_handler
23func reset_handler
24 mov r10, lr
25
26 /* The plat_reset_handler can clobber r0 - r9 */
27 bl plat_reset_handler
28
29 /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
30 bl get_cpu_ops_ptr
31
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010032#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +010033 cmp r0, #0
34 ASM_ASSERT(ne)
35#endif
36
37 /* Get the cpu_ops reset handler */
38 ldr r1, [r0, #CPU_RESET_FUNC]
39 cmp r1, #0
40 mov lr, r10
41 bxne r1
42 bx lr
43endfunc reset_handler
44
Roberto Vargase0e99462017-10-30 14:43:43 +000045#endif
Yatharth Kocharf528faf2016-06-28 16:58:26 +010046
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090047#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
Soby Mathew748be1d2016-05-05 14:10:46 +010048 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000049 * void prepare_cpu_pwr_dwn(unsigned int power_level)
50 *
51 * Prepare CPU power down function for all platforms. The function takes
52 * a domain level to be powered down as its parameter. After the cpu_ops
53 * pointer is retrieved from cpu_data, the handler for requested power
54 * level is called.
Soby Mathew748be1d2016-05-05 14:10:46 +010055 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000056 .globl prepare_cpu_pwr_dwn
57func prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010058 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000059 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
60 * power down handler for the last power level
Soby Mathew748be1d2016-05-05 14:10:46 +010061 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000062 mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
63 cmp r0, r2
64 movhi r0, r2
65
66 push {r0, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010067 bl _cpu_data
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000068 pop {r2, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010069
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000070 ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010071#if ENABLE_ASSERTIONS
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000072 cmp r0, #0
Soby Mathew748be1d2016-05-05 14:10:46 +010073 ASM_ASSERT(ne)
74#endif
75
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000076 /* Get the appropriate power down handler */
77 mov r1, #CPU_PWR_DWN_OPS
78 add r1, r1, r2, lsl #2
79 ldr r1, [r0, r1]
80 bx r1
81endfunc prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010082
83 /*
84 * Initializes the cpu_ops_ptr if not already initialized
85 * in cpu_data. This must only be called after the data cache
86 * is enabled. AAPCS is followed.
87 */
88 .globl init_cpu_ops
89func init_cpu_ops
90 push {r4 - r6, lr}
91 bl _cpu_data
92 mov r6, r0
93 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
94 cmp r1, #0
95 bne 1f
96 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010097#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +010098 cmp r0, #0
99 ASM_ASSERT(ne)
100#endif
101 str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1021:
103 pop {r4 - r6, pc}
104endfunc init_cpu_ops
105
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100106#endif /* IMAGE_BL32 */
107
Soby Mathew748be1d2016-05-05 14:10:46 +0100108 /*
109 * The below function returns the cpu_ops structure matching the
110 * midr of the core. It reads the MIDR and finds the matching
111 * entry in cpu_ops entries. Only the implementation and part number
112 * are used to match the entries.
113 * Return :
114 * r0 - The matching cpu_ops pointer on Success
115 * r0 - 0 on failure.
116 * Clobbers: r0 - r5
117 */
118 .globl get_cpu_ops_ptr
119func get_cpu_ops_ptr
120 /* Get the cpu_ops start and end locations */
121 ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
122 ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
123
124 /* Initialize the return parameter */
125 mov r0, #0
126
127 /* Read the MIDR_EL1 */
128 ldcopr r2, MIDR
129 ldr r3, =CPU_IMPL_PN_MASK
130
131 /* Retain only the implementation and part number using mask */
132 and r2, r2, r3
1331:
134 /* Check if we have reached end of list */
135 cmp r4, r5
Douglas Raillard9d92e8c2017-03-07 16:36:14 +0000136 bhs error_exit
Soby Mathew748be1d2016-05-05 14:10:46 +0100137
138 /* load the midr from the cpu_ops */
139 ldr r1, [r4], #CPU_OPS_SIZE
140 and r1, r1, r3
141
142 /* Check if midr matches to midr of this core */
143 cmp r1, r2
144 bne 1b
145
146 /* Subtract the increment and offset to get the cpu-ops pointer */
147 sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
148error_exit:
149 bx lr
150endfunc get_cpu_ops_ptr
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000151
152/*
153 * Extract CPU revision and variant, and combine them into a single numeric for
154 * easier comparison.
155 */
156 .globl cpu_get_rev_var
157func cpu_get_rev_var
158 ldcopr r1, MIDR
159
160 /*
161 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
162 * r0[0:7] as variant[7:4] and revision[3:0]:
163 *
164 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
165 * extract r1[3:0] into r0[3:0] retaining other bits.
166 */
167 ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
168 bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
169 bx lr
170endfunc cpu_get_rev_var
171
172/*
173 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
174 * application purposes. If the revision-variant is less than or same as a given
175 * value, indicates that errata applies; otherwise not.
176 */
177 .globl cpu_rev_var_ls
178func cpu_rev_var_ls
179 cmp r0, r1
180 movls r0, #ERRATA_APPLIES
181 movhi r0, #ERRATA_NOT_APPLIES
182 bx lr
183endfunc cpu_rev_var_ls
184
Dimitris Papastamos370542e2017-06-05 13:36:34 +0100185/*
186 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
187 * application purposes. If the revision-variant is higher than or same as a
188 * given value, indicates that errata applies; otherwise not.
189 */
190 .globl cpu_rev_var_hs
191func cpu_rev_var_hs
192 cmp r0, r1
193 movge r0, #ERRATA_APPLIES
194 movlt r0, #ERRATA_NOT_APPLIES
195 bx lr
196endfunc cpu_rev_var_hs
197
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000198#if REPORT_ERRATA
199/*
200 * void print_errata_status(void);
201 *
202 * Function to print errata status for CPUs of its class. Must be called only:
203 *
204 * - with MMU and data caches are enabled;
205 * - after cpu_ops have been initialized in per-CPU data.
206 */
207 .globl print_errata_status
208func print_errata_status
Soby Mathewd1e19302018-02-21 15:48:03 +0000209 /* r12 is pushed only for the sake of 8-byte stack alignment */
210 push {r4, r5, r12, lr}
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000211#ifdef IMAGE_BL1
212 /*
213 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
214 * directly.
215 */
216 bl get_cpu_ops_ptr
217 ldr r0, [r0, #CPU_ERRATA_FUNC]
218 cmp r0, #0
219 blxne r0
220#else
221 /*
222 * Retrieve pointer to cpu_ops, and further, the errata printing
223 * function. If it's non-NULL, jump to the function in turn.
224 */
225 bl _cpu_data
226 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
227 ldr r0, [r1, #CPU_ERRATA_FUNC]
228 cmp r0, #0
229 beq 1f
230
231 mov r4, r0
232
233 /*
234 * Load pointers to errata lock and printed flag. Call
235 * errata_needs_reporting to check whether this CPU needs to report
236 * errata status pertaining to its class.
237 */
238 ldr r0, [r1, #CPU_ERRATA_LOCK]
239 ldr r1, [r1, #CPU_ERRATA_PRINTED]
240 bl errata_needs_reporting
241 cmp r0, #0
242 blxne r4
2431:
244#endif
Soby Mathewd1e19302018-02-21 15:48:03 +0000245 pop {r4, r5, r12, pc}
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000246endfunc print_errata_status
247#endif