blob: 83e3e4961c0c0d2facadbf196c058422f9c04a98 [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
Govindraj Rajaeee28e72023-08-01 15:52:40 -05002 * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
Soby Mathew748be1d2016-05-05 14:10:46 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew748be1d2016-05-05 14:10:46 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Soby Mathew748be1d2016-05-05 14:10:46 +010010#include <cpu_macros.S>
Varun Wadekar4d034c52019-01-11 14:47:48 -080011#include <common/bl_common.h>
Boyan Karatotev06236c92023-01-25 18:50:10 +000012#include <lib/cpus/cpu_ops.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/el3_runtime/cpu_data.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010014
Arvind Ram Prakash11b9b492022-11-22 14:41:00 -060015#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
16 (defined(IMAGE_BL2) && RESET_TO_BL2)
Soby Mathew748be1d2016-05-05 14:10:46 +010017 /*
18 * The reset handler common to all platforms. After a matching
19 * cpu_ops structure entry is found, the correponding reset_handler
20 * in the cpu_ops is invoked. The reset handler is invoked very early
21 * in the boot sequence and it is assumed that we can clobber r0 - r10
22 * without the need to follow AAPCS.
23 * Clobbers: r0 - r10
24 */
25 .globl reset_handler
26func reset_handler
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010027 mov r8, lr
Soby Mathew748be1d2016-05-05 14:10:46 +010028
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010029 /* The plat_reset_handler can clobber r0 - r7 */
Soby Mathew748be1d2016-05-05 14:10:46 +010030 bl plat_reset_handler
31
32 /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
33 bl get_cpu_ops_ptr
34
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010035#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +010036 cmp r0, #0
37 ASM_ASSERT(ne)
38#endif
39
40 /* Get the cpu_ops reset handler */
41 ldr r1, [r0, #CPU_RESET_FUNC]
42 cmp r1, #0
Heiko Stuebnerdbb0db62019-03-06 00:29:13 +010043 mov lr, r8
Soby Mathew748be1d2016-05-05 14:10:46 +010044 bxne r1
45 bx lr
46endfunc reset_handler
47
Roberto Vargase0e99462017-10-30 14:43:43 +000048#endif
Yatharth Kocharf528faf2016-06-28 16:58:26 +010049
Masahiro Yamada441bfdd2016-12-25 23:36:24 +090050#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
Soby Mathew748be1d2016-05-05 14:10:46 +010051 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000052 * void prepare_cpu_pwr_dwn(unsigned int power_level)
53 *
54 * Prepare CPU power down function for all platforms. The function takes
55 * a domain level to be powered down as its parameter. After the cpu_ops
56 * pointer is retrieved from cpu_data, the handler for requested power
57 * level is called.
Soby Mathew748be1d2016-05-05 14:10:46 +010058 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000059 .globl prepare_cpu_pwr_dwn
60func prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010061 /*
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000062 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
63 * power down handler for the last power level
Soby Mathew748be1d2016-05-05 14:10:46 +010064 */
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000065 mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
66 cmp r0, r2
67 movhi r0, r2
68
69 push {r0, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010070 bl _cpu_data
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000071 pop {r2, lr}
Soby Mathew748be1d2016-05-05 14:10:46 +010072
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000073 ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +010074#if ENABLE_ASSERTIONS
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000075 cmp r0, #0
Soby Mathew748be1d2016-05-05 14:10:46 +010076 ASM_ASSERT(ne)
77#endif
78
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000079 /* Get the appropriate power down handler */
80 mov r1, #CPU_PWR_DWN_OPS
81 add r1, r1, r2, lsl #2
82 ldr r1, [r0, r1]
Yann Gautierb5800952021-02-23 14:50:44 +010083#if ENABLE_ASSERTIONS
84 cmp r1, #0
85 ASM_ASSERT(ne)
86#endif
Jeenu Viswambharanee5eb802016-11-18 12:58:28 +000087 bx r1
88endfunc prepare_cpu_pwr_dwn
Soby Mathew748be1d2016-05-05 14:10:46 +010089
90 /*
91 * Initializes the cpu_ops_ptr if not already initialized
92 * in cpu_data. This must only be called after the data cache
93 * is enabled. AAPCS is followed.
94 */
95 .globl init_cpu_ops
96func init_cpu_ops
97 push {r4 - r6, lr}
98 bl _cpu_data
99 mov r6, r0
100 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
101 cmp r1, #0
102 bne 1f
103 bl get_cpu_ops_ptr
Antonio Nino Diaz7c65c1e2017-04-20 09:58:28 +0100104#if ENABLE_ASSERTIONS
Soby Mathew748be1d2016-05-05 14:10:46 +0100105 cmp r0, #0
106 ASM_ASSERT(ne)
107#endif
108 str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1091:
110 pop {r4 - r6, pc}
111endfunc init_cpu_ops
112
Yatharth Kocharf528faf2016-06-28 16:58:26 +0100113#endif /* IMAGE_BL32 */
114
Soby Mathew748be1d2016-05-05 14:10:46 +0100115 /*
116 * The below function returns the cpu_ops structure matching the
117 * midr of the core. It reads the MIDR and finds the matching
118 * entry in cpu_ops entries. Only the implementation and part number
119 * are used to match the entries.
120 * Return :
121 * r0 - The matching cpu_ops pointer on Success
122 * r0 - 0 on failure.
123 * Clobbers: r0 - r5
124 */
125 .globl get_cpu_ops_ptr
126func get_cpu_ops_ptr
127 /* Get the cpu_ops start and end locations */
128 ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
129 ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
130
131 /* Initialize the return parameter */
132 mov r0, #0
133
134 /* Read the MIDR_EL1 */
135 ldcopr r2, MIDR
136 ldr r3, =CPU_IMPL_PN_MASK
137
138 /* Retain only the implementation and part number using mask */
139 and r2, r2, r3
1401:
141 /* Check if we have reached end of list */
142 cmp r4, r5
Douglas Raillard9d92e8c2017-03-07 16:36:14 +0000143 bhs error_exit
Soby Mathew748be1d2016-05-05 14:10:46 +0100144
145 /* load the midr from the cpu_ops */
146 ldr r1, [r4], #CPU_OPS_SIZE
147 and r1, r1, r3
148
149 /* Check if midr matches to midr of this core */
150 cmp r1, r2
151 bne 1b
152
153 /* Subtract the increment and offset to get the cpu-ops pointer */
154 sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
Yann Gautierb5800952021-02-23 14:50:44 +0100155#if ENABLE_ASSERTIONS
156 cmp r0, #0
157 ASM_ASSERT(ne)
158#endif
Soby Mathew748be1d2016-05-05 14:10:46 +0100159error_exit:
160 bx lr
161endfunc get_cpu_ops_ptr
Jeenu Viswambharand5ec3672017-01-03 11:01:51 +0000162
163/*
164 * Extract CPU revision and variant, and combine them into a single numeric for
165 * easier comparison.
166 */
167 .globl cpu_get_rev_var
168func cpu_get_rev_var
169 ldcopr r1, MIDR
170
171 /*
172 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
173 * r0[0:7] as variant[7:4] and revision[3:0]:
174 *
175 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
176 * extract r1[3:0] into r0[3:0] retaining other bits.
177 */
178 ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
179 bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
180 bx lr
181endfunc cpu_get_rev_var
182
183/*
184 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
185 * application purposes. If the revision-variant is less than or same as a given
186 * value, indicates that errata applies; otherwise not.
187 */
188 .globl cpu_rev_var_ls
189func cpu_rev_var_ls
190 cmp r0, r1
191 movls r0, #ERRATA_APPLIES
192 movhi r0, #ERRATA_NOT_APPLIES
193 bx lr
194endfunc cpu_rev_var_ls
195
Dimitris Papastamos370542e2017-06-05 13:36:34 +0100196/*
197 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
198 * application purposes. If the revision-variant is higher than or same as a
199 * given value, indicates that errata applies; otherwise not.
200 */
201 .globl cpu_rev_var_hs
202func cpu_rev_var_hs
203 cmp r0, r1
204 movge r0, #ERRATA_APPLIES
205 movlt r0, #ERRATA_NOT_APPLIES
206 bx lr
207endfunc cpu_rev_var_hs