blob: 9f829c9f63eff652606fa6642d06e7667b2583fe [file] [log] [blame]
Michal Simekef8f5592015-06-15 14:22:50 +02001/*
Michal Simek2a47faa2023-04-14 08:43:51 +02002 * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
Prasad Kummarie0783112023-04-26 11:02:07 +05303 * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
Michal Simekef8f5592015-06-15 14:22:50 +02004 *
dp-armfa3cf0b2017-05-03 09:38:09 +01005 * SPDX-License-Identifier: BSD-3-Clause
Michal Simekef8f5592015-06-15 14:22:50 +02006 */
7
Michal Simekef8f5592015-06-15 14:22:50 +02008#include <assert.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <arch_helpers.h>
13#include <common/debug.h>
Venkatesh Yadav Abbarapu1463dd52020-01-07 03:25:16 -070014#include <plat_startup.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015
Michal Simekef8f5592015-06-15 14:22:50 +020016
17/*
Prasad Kummari07795fa2023-06-08 21:36:38 +053018 * HandoffParams
Michal Simekef8f5592015-06-15 14:22:50 +020019 * Parameter bitfield encoding
20 * -----------------------------------------------------------------------------
21 * Exec State 0 0 -> Aarch64, 1-> Aarch32
Soren Brinkmann8bcd3052016-05-29 09:48:26 -070022 * endianness 1 0 -> LE, 1 -> BE
Michal Simekef8f5592015-06-15 14:22:50 +020023 * secure (TZ) 2 0 -> Non secure, 1 -> secure
24 * EL 3:4 00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3
25 * CPU# 5:6 00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3
Akshay Belsare29ffe142023-06-15 16:48:46 +053026 * Reserved 7:10 Reserved
27 * Cluster# 11:12 00 -> Cluster 0, 01 -> Cluster 1, 10 -> Cluster 2,
28 * 11 -> Cluster (Applicable for Versal NET only).
29 * Reserved 13:16 Reserved
Michal Simekef8f5592015-06-15 14:22:50 +020030 */
31
Prasad Kummari07795fa2023-06-08 21:36:38 +053032#define XBL_FLAGS_ESTATE_SHIFT 0U
33#define XBL_FLAGS_ESTATE_MASK (1U << XBL_FLAGS_ESTATE_SHIFT)
34#define XBL_FLAGS_ESTATE_A64 0U
35#define XBL_FLAGS_ESTATE_A32 1U
Michal Simekef8f5592015-06-15 14:22:50 +020036
Prasad Kummari07795fa2023-06-08 21:36:38 +053037#define XBL_FLAGS_ENDIAN_SHIFT 1U
38#define XBL_FLAGS_ENDIAN_MASK (1U << XBL_FLAGS_ENDIAN_SHIFT)
39#define XBL_FLAGS_ENDIAN_LE 0U
40#define XBL_FLAGS_ENDIAN_BE 1U
Michal Simekef8f5592015-06-15 14:22:50 +020041
Prasad Kummari07795fa2023-06-08 21:36:38 +053042#define XBL_FLAGS_TZ_SHIFT 2U
43#define XBL_FLAGS_TZ_MASK (1U << XBL_FLAGS_TZ_SHIFT)
44#define XBL_FLAGS_NON_SECURE 0U
45#define XBL_FLAGS_SECURE 1U
Michal Simekef8f5592015-06-15 14:22:50 +020046
Prasad Kummari07795fa2023-06-08 21:36:38 +053047#define XBL_FLAGS_EL_SHIFT 3U
48#define XBL_FLAGS_EL_MASK (3U << XBL_FLAGS_EL_SHIFT)
49#define XBL_FLAGS_EL0 0U
50#define XBL_FLAGS_EL1 1U
51#define XBL_FLAGS_EL2 2U
52#define XBL_FLAGS_EL3 3U
Michal Simekef8f5592015-06-15 14:22:50 +020053
Prasad Kummari07795fa2023-06-08 21:36:38 +053054#define XBL_FLAGS_CPU_SHIFT 5U
55#define XBL_FLAGS_CPU_MASK (3U << XBL_FLAGS_CPU_SHIFT)
56#define XBL_FLAGS_A53_0 0U
57#define XBL_FLAGS_A53_1 1U
58#define XBL_FLAGS_A53_2 2U
59#define XBL_FLAGS_A53_3 3U
Michal Simekef8f5592015-06-15 14:22:50 +020060
Akshay Belsare29ffe142023-06-15 16:48:46 +053061#if defined(PLAT_versal_net)
62#define XBL_FLAGS_CLUSTER_SHIFT 11U
63#define XBL_FLAGS_CLUSTER_MASK GENMASK(11, 12)
64
65#define XBL_FLAGS_CLUSTER_0 0U
66#endif /* PLAT_versal_net */
67
Michal Simekef8f5592015-06-15 14:22:50 +020068/**
Prasad Kummari07795fa2023-06-08 21:36:38 +053069 * get_xbl_cpu() - Get the target CPU for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +053070 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +020071 *
Prasad Kummari07795fa2023-06-08 21:36:38 +053072 * Return: XBL_FLAGS_A53_0, XBL_FLAGS_A53_1, XBL_FLAGS_A53_2 or XBL_FLAGS_A53_3.
Michal Simekef8f5592015-06-15 14:22:50 +020073 *
Michal Simekef8f5592015-06-15 14:22:50 +020074 */
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +053075static uint32_t get_xbl_cpu(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +020076{
Prasad Kummari07795fa2023-06-08 21:36:38 +053077 uint64_t flags = partition->flags & XBL_FLAGS_CPU_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +020078
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +053079 flags >>= XBL_FLAGS_CPU_SHIFT;
80
81 return (uint32_t)flags;
Michal Simekef8f5592015-06-15 14:22:50 +020082}
83
84/**
Prasad Kummari07795fa2023-06-08 21:36:38 +053085 * get_xbl_el() - Get the target exception level for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +053086 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +020087 *
Prasad Kummari07795fa2023-06-08 21:36:38 +053088 * Return: XBL_FLAGS_EL0, XBL_FLAGS_EL1, XBL_FLAGS_EL2 or XBL_FLAGS_EL3.
Michal Simekef8f5592015-06-15 14:22:50 +020089 *
Michal Simekef8f5592015-06-15 14:22:50 +020090 */
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +053091static uint32_t get_xbl_el(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +020092{
Prasad Kummari07795fa2023-06-08 21:36:38 +053093 uint64_t flags = partition->flags & XBL_FLAGS_EL_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +020094
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +053095 flags >>= XBL_FLAGS_EL_SHIFT;
96
97 return (uint32_t)flags;
Michal Simekef8f5592015-06-15 14:22:50 +020098}
99
100/**
Prasad Kummari07795fa2023-06-08 21:36:38 +0530101 * get_xbl_ss() - Get the target security state for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530102 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +0200103 *
Prasad Kummari07795fa2023-06-08 21:36:38 +0530104 * Return: XBL_FLAGS_NON_SECURE or XBL_FLAGS_SECURE.
Michal Simekef8f5592015-06-15 14:22:50 +0200105 *
Michal Simekef8f5592015-06-15 14:22:50 +0200106 */
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530107static uint32_t get_xbl_ss(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +0200108{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530109 uint64_t flags = partition->flags & XBL_FLAGS_TZ_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +0200110
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530111 flags >>= XBL_FLAGS_TZ_SHIFT;
112
113 return (uint32_t)flags;
Michal Simekef8f5592015-06-15 14:22:50 +0200114}
115
116/**
Prasad Kummari07795fa2023-06-08 21:36:38 +0530117 * get_xbl_endian() - Get the target endianness for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530118 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +0200119 *
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530120 * Return: SPSR_E_LITTLE or SPSR_E_BIG.
Michal Simekef8f5592015-06-15 14:22:50 +0200121 *
Michal Simekef8f5592015-06-15 14:22:50 +0200122 */
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530123static uint32_t get_xbl_endian(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +0200124{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530125 uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +0200126
Prasad Kummari07795fa2023-06-08 21:36:38 +0530127 flags >>= XBL_FLAGS_ENDIAN_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +0200128
Prasad Kummari07795fa2023-06-08 21:36:38 +0530129 if (flags == XBL_FLAGS_ENDIAN_BE) {
Michal Simekef8f5592015-06-15 14:22:50 +0200130 return SPSR_E_BIG;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530131 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200132 return SPSR_E_LITTLE;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530133 }
Michal Simekef8f5592015-06-15 14:22:50 +0200134}
135
136/**
Prasad Kummari07795fa2023-06-08 21:36:38 +0530137 * get_xbl_estate() - Get the target execution state for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530138 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +0200139 *
Prasad Kummari07795fa2023-06-08 21:36:38 +0530140 * Return: XBL_FLAGS_ESTATE_A32 or XBL_FLAGS_ESTATE_A64.
Michal Simekef8f5592015-06-15 14:22:50 +0200141 *
Michal Simekef8f5592015-06-15 14:22:50 +0200142 */
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530143static uint32_t get_xbl_estate(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +0200144{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530145 uint64_t flags = partition->flags & XBL_FLAGS_ESTATE_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +0200146
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530147 flags >>= XBL_FLAGS_ESTATE_SHIFT;
148
149 return flags;
Michal Simekef8f5592015-06-15 14:22:50 +0200150}
151
Akshay Belsare29ffe142023-06-15 16:48:46 +0530152#if defined(PLAT_versal_net)
153/**
154 * get_xbl_cluster - Get the cluster number
155 * @partition: pointer to the partition structure.
156 *
157 * Return: cluster number for the partition.
158 */
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530159static uint32_t get_xbl_cluster(const struct xbl_partition *partition)
Akshay Belsare29ffe142023-06-15 16:48:46 +0530160{
161 uint64_t flags = partition->flags & XBL_FLAGS_CLUSTER_MASK;
162
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530163 return (flags >> XBL_FLAGS_CLUSTER_SHIFT);
Akshay Belsare29ffe142023-06-15 16:48:46 +0530164}
165#endif /* PLAT_versal_net */
166
Michal Simekef8f5592015-06-15 14:22:50 +0200167/**
Prasad Kummari2ef9f0c2023-07-19 11:48:42 +0530168 * xbl_handover() - Populates the bl32 and bl33 image info structures.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530169 * @bl32: BL32 image info structure.
170 * @bl33: BL33 image info structure.
Prasad Kummari2ef9f0c2023-07-19 11:48:42 +0530171 * @handoff_addr: TF-A handoff address.
Michal Simekef8f5592015-06-15 14:22:50 +0200172 *
Prasad Kummari07795fa2023-06-08 21:36:38 +0530173 * Process the handoff parameters from the XBL and populate the BL32 and BL33
Michal Simekef8f5592015-06-15 14:22:50 +0200174 * image info structures accordingly.
Siva Durga Prasad Paladugu8f499722018-05-17 15:17:46 +0530175 *
176 * Return: Return the status of the handoff. The value will be from the
Prasad Kummari07795fa2023-06-08 21:36:38 +0530177 * xbl_handoff enum.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530178 *
Michal Simekef8f5592015-06-15 14:22:50 +0200179 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530180enum xbl_handoff xbl_handover(entry_point_info_t *bl32,
Venkatesh Yadav Abbarapu1463dd52020-01-07 03:25:16 -0700181 entry_point_info_t *bl33,
Prasad Kummari07795fa2023-06-08 21:36:38 +0530182 uint64_t handoff_addr)
Michal Simekef8f5592015-06-15 14:22:50 +0200183{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530184 const struct xbl_handoff_params *HandoffParams;
185
Maheedhar Bollapalli9c3fc0b2024-04-24 12:53:28 +0530186 if (handoff_addr == 0U) {
Prasad Kummari07795fa2023-06-08 21:36:38 +0530187 WARN("BL31: No handoff structure passed\n");
188 return XBL_HANDOFF_NO_STRUCT;
Michal Simekef8f5592015-06-15 14:22:50 +0200189 }
190
Prasad Kummari07795fa2023-06-08 21:36:38 +0530191 HandoffParams = (struct xbl_handoff_params *)handoff_addr;
Maheedhar Bollapalli937b77d2024-04-23 17:27:03 +0530192 if ((HandoffParams->magic[0] != (uint8_t)'X') ||
193 (HandoffParams->magic[1] != (uint8_t)'L') ||
194 (HandoffParams->magic[2] != (uint8_t)'N') ||
195 (HandoffParams->magic[3] != (uint8_t)'X')) {
Prasad Kummari07795fa2023-06-08 21:36:38 +0530196 ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr);
197 return XBL_HANDOFF_INVAL_STRUCT;
Michal Simekef8f5592015-06-15 14:22:50 +0200198 }
199
Prasad Kummarie0783112023-04-26 11:02:07 +0530200 VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530201 handoff_addr, HandoffParams->num_entries);
202 if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) {
Prasad Kummarie0783112023-04-26 11:02:07 +0530203 ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530204 HandoffParams->num_entries, XBL_MAX_PARTITIONS);
205 return XBL_HANDOFF_TOO_MANY_PARTS;
Michal Simekef8f5592015-06-15 14:22:50 +0200206 }
207
208 /*
209 * we loop over all passed entries but only populate two image structs
210 * (bl32, bl33). I.e. the last applicable images in the handoff
211 * structure will be used for the hand off
212 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530213 for (size_t i = 0; i < HandoffParams->num_entries; i++) {
Michal Simekef8f5592015-06-15 14:22:50 +0200214 entry_point_info_t *image;
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530215 uint32_t target_estate, target_secure, target_cpu;
Venkatesh Yadav Abbarapua2ca35d2022-07-04 11:40:27 +0530216 uint32_t target_endianness, target_el;
Michal Simekef8f5592015-06-15 14:22:50 +0200217
Scott Brandene5dcf982020-08-25 13:49:32 -0700218 VERBOSE("BL31: %zd: entry:0x%" PRIx64 ", flags:0x%" PRIx64 "\n", i,
Prasad Kummari07795fa2023-06-08 21:36:38 +0530219 HandoffParams->partition[i].entry_point,
220 HandoffParams->partition[i].flags);
Michal Simekef8f5592015-06-15 14:22:50 +0200221
Akshay Belsare29ffe142023-06-15 16:48:46 +0530222#if defined(PLAT_versal_net)
223 uint32_t target_cluster;
224
225 target_cluster = get_xbl_cluster(&HandoffParams->partition[i]);
226 if (target_cluster != XBL_FLAGS_CLUSTER_0) {
227 WARN("BL31: invalid target Cluster (%i)\n",
228 target_cluster);
229 continue;
230 }
231#endif /* PLAT_versal_net */
232
Prasad Kummari07795fa2023-06-08 21:36:38 +0530233 target_cpu = get_xbl_cpu(&HandoffParams->partition[i]);
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530234 if (target_cpu != XBL_FLAGS_A53_0) {
Michal Simekef8f5592015-06-15 14:22:50 +0200235 WARN("BL31: invalid target CPU (%i)\n", target_cpu);
236 continue;
237 }
238
Prasad Kummari07795fa2023-06-08 21:36:38 +0530239 target_el = get_xbl_el(&HandoffParams->partition[i]);
240 if ((target_el == XBL_FLAGS_EL3) ||
241 (target_el == XBL_FLAGS_EL0)) {
Akshay Belsared66a7892023-06-22 11:57:18 +0530242 WARN("BL31: invalid target exception level(%i)\n",
243 target_el);
Michal Simekef8f5592015-06-15 14:22:50 +0200244 continue;
245 }
246
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530247 target_secure = get_xbl_ss(&HandoffParams->partition[i]);
248 if ((target_secure == XBL_FLAGS_SECURE) &&
Nithin Ge6c28532024-04-22 13:06:06 +0530249 (target_el == XBL_FLAGS_EL2)) {
Michal Simekef8f5592015-06-15 14:22:50 +0200250 WARN("BL31: invalid security state (%i) for exception level (%i)\n",
251 target_secure, target_el);
252 continue;
253 }
254
Prasad Kummari07795fa2023-06-08 21:36:38 +0530255 target_estate = get_xbl_estate(&HandoffParams->partition[i]);
256 target_endianness = get_xbl_endian(&HandoffParams->partition[i]);
Michal Simekef8f5592015-06-15 14:22:50 +0200257
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530258 if (target_secure == XBL_FLAGS_SECURE) {
Michal Simekef8f5592015-06-15 14:22:50 +0200259 image = bl32;
260
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530261 if (target_estate == XBL_FLAGS_ESTATE_A32) {
262 bl32->spsr = (uint32_t)SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
263 (uint64_t)target_endianness,
Michal Simekef8f5592015-06-15 14:22:50 +0200264 DISABLE_ALL_EXCEPTIONS);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530265 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200266 bl32->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
267 DISABLE_ALL_EXCEPTIONS);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530268 }
Michal Simekef8f5592015-06-15 14:22:50 +0200269 } else {
270 image = bl33;
271
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530272 if (target_estate == XBL_FLAGS_ESTATE_A32) {
Prasad Kummari07795fa2023-06-08 21:36:38 +0530273 if (target_el == XBL_FLAGS_EL2) {
Michal Simekef8f5592015-06-15 14:22:50 +0200274 target_el = MODE32_hyp;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530275 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200276 target_el = MODE32_sys;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530277 }
Michal Simekef8f5592015-06-15 14:22:50 +0200278
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530279 bl33->spsr = (uint32_t)SPSR_MODE32((uint64_t)target_el, SPSR_T_ARM,
280 (uint64_t)target_endianness,
Michal Simekef8f5592015-06-15 14:22:50 +0200281 DISABLE_ALL_EXCEPTIONS);
282 } else {
Prasad Kummari07795fa2023-06-08 21:36:38 +0530283 if (target_el == XBL_FLAGS_EL2) {
Michal Simekef8f5592015-06-15 14:22:50 +0200284 target_el = MODE_EL2;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530285 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200286 target_el = MODE_EL1;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530287 }
Michal Simekef8f5592015-06-15 14:22:50 +0200288
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530289 bl33->spsr = (uint32_t)SPSR_64((uint64_t)target_el, MODE_SP_ELX,
Michal Simekef8f5592015-06-15 14:22:50 +0200290 DISABLE_ALL_EXCEPTIONS);
291 }
292 }
293
Scott Brandene5dcf982020-08-25 13:49:32 -0700294 VERBOSE("Setting up %s entry point to:%" PRIx64 ", el:%x\n",
Maheedhar Bollapallicc3c1002024-04-24 16:51:22 +0530295 (target_secure == XBL_FLAGS_SECURE) ? "BL32" : "BL33",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530296 HandoffParams->partition[i].entry_point,
Michal Simekef8f5592015-06-15 14:22:50 +0200297 target_el);
Prasad Kummari07795fa2023-06-08 21:36:38 +0530298 image->pc = HandoffParams->partition[i].entry_point;
Michal Simekef8f5592015-06-15 14:22:50 +0200299
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530300 if (target_endianness == SPSR_E_BIG) {
Michal Simekef8f5592015-06-15 14:22:50 +0200301 EP_SET_EE(image->h.attr, EP_EE_BIG);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530302 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200303 EP_SET_EE(image->h.attr, EP_EE_LITTLE);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530304 }
Michal Simekef8f5592015-06-15 14:22:50 +0200305 }
Siva Durga Prasad Paladugu8f499722018-05-17 15:17:46 +0530306
Prasad Kummari07795fa2023-06-08 21:36:38 +0530307 return XBL_HANDOFF_SUCCESS;
Michal Simekef8f5592015-06-15 14:22:50 +0200308}