blob: 5beb76583a331b9a6dee80803a2360afac644c4c [file] [log] [blame]
Michal Simekef8f5592015-06-15 14:22:50 +02001/*
Michal Simek2a47faa2023-04-14 08:43:51 +02002 * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
Prasad Kummarie0783112023-04-26 11:02:07 +05303 * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
Michal Simekef8f5592015-06-15 14:22:50 +02004 *
dp-armfa3cf0b2017-05-03 09:38:09 +01005 * SPDX-License-Identifier: BSD-3-Clause
Michal Simekef8f5592015-06-15 14:22:50 +02006 */
7
Michal Simekef8f5592015-06-15 14:22:50 +02008#include <assert.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <arch_helpers.h>
13#include <common/debug.h>
Venkatesh Yadav Abbarapu1463dd52020-01-07 03:25:16 -070014#include <plat_startup.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015
Michal Simekef8f5592015-06-15 14:22:50 +020016
17/*
Prasad Kummari07795fa2023-06-08 21:36:38 +053018 * HandoffParams
Michal Simekef8f5592015-06-15 14:22:50 +020019 * Parameter bitfield encoding
20 * -----------------------------------------------------------------------------
21 * Exec State 0 0 -> Aarch64, 1-> Aarch32
Soren Brinkmann8bcd3052016-05-29 09:48:26 -070022 * endianness 1 0 -> LE, 1 -> BE
Michal Simekef8f5592015-06-15 14:22:50 +020023 * secure (TZ) 2 0 -> Non secure, 1 -> secure
24 * EL 3:4 00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3
25 * CPU# 5:6 00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3
Akshay Belsare29ffe142023-06-15 16:48:46 +053026 * Reserved 7:10 Reserved
27 * Cluster# 11:12 00 -> Cluster 0, 01 -> Cluster 1, 10 -> Cluster 2,
28 * 11 -> Cluster (Applicable for Versal NET only).
29 * Reserved 13:16 Reserved
Michal Simekef8f5592015-06-15 14:22:50 +020030 */
31
Prasad Kummari07795fa2023-06-08 21:36:38 +053032#define XBL_FLAGS_ESTATE_SHIFT 0U
33#define XBL_FLAGS_ESTATE_MASK (1U << XBL_FLAGS_ESTATE_SHIFT)
34#define XBL_FLAGS_ESTATE_A64 0U
35#define XBL_FLAGS_ESTATE_A32 1U
Michal Simekef8f5592015-06-15 14:22:50 +020036
Prasad Kummari07795fa2023-06-08 21:36:38 +053037#define XBL_FLAGS_ENDIAN_SHIFT 1U
38#define XBL_FLAGS_ENDIAN_MASK (1U << XBL_FLAGS_ENDIAN_SHIFT)
39#define XBL_FLAGS_ENDIAN_LE 0U
40#define XBL_FLAGS_ENDIAN_BE 1U
Michal Simekef8f5592015-06-15 14:22:50 +020041
Prasad Kummari07795fa2023-06-08 21:36:38 +053042#define XBL_FLAGS_TZ_SHIFT 2U
43#define XBL_FLAGS_TZ_MASK (1U << XBL_FLAGS_TZ_SHIFT)
44#define XBL_FLAGS_NON_SECURE 0U
45#define XBL_FLAGS_SECURE 1U
Michal Simekef8f5592015-06-15 14:22:50 +020046
Prasad Kummari07795fa2023-06-08 21:36:38 +053047#define XBL_FLAGS_EL_SHIFT 3U
48#define XBL_FLAGS_EL_MASK (3U << XBL_FLAGS_EL_SHIFT)
49#define XBL_FLAGS_EL0 0U
50#define XBL_FLAGS_EL1 1U
51#define XBL_FLAGS_EL2 2U
52#define XBL_FLAGS_EL3 3U
Michal Simekef8f5592015-06-15 14:22:50 +020053
Prasad Kummari07795fa2023-06-08 21:36:38 +053054#define XBL_FLAGS_CPU_SHIFT 5U
55#define XBL_FLAGS_CPU_MASK (3U << XBL_FLAGS_CPU_SHIFT)
56#define XBL_FLAGS_A53_0 0U
57#define XBL_FLAGS_A53_1 1U
58#define XBL_FLAGS_A53_2 2U
59#define XBL_FLAGS_A53_3 3U
Michal Simekef8f5592015-06-15 14:22:50 +020060
Akshay Belsare29ffe142023-06-15 16:48:46 +053061#if defined(PLAT_versal_net)
62#define XBL_FLAGS_CLUSTER_SHIFT 11U
63#define XBL_FLAGS_CLUSTER_MASK GENMASK(11, 12)
64
65#define XBL_FLAGS_CLUSTER_0 0U
66#endif /* PLAT_versal_net */
67
Michal Simekef8f5592015-06-15 14:22:50 +020068/**
Prasad Kummari07795fa2023-06-08 21:36:38 +053069 * get_xbl_cpu() - Get the target CPU for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +053070 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +020071 *
Prasad Kummari07795fa2023-06-08 21:36:38 +053072 * Return: XBL_FLAGS_A53_0, XBL_FLAGS_A53_1, XBL_FLAGS_A53_2 or XBL_FLAGS_A53_3.
Michal Simekef8f5592015-06-15 14:22:50 +020073 *
Michal Simekef8f5592015-06-15 14:22:50 +020074 */
Prasad Kummari07795fa2023-06-08 21:36:38 +053075static int32_t get_xbl_cpu(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +020076{
Prasad Kummari07795fa2023-06-08 21:36:38 +053077 uint64_t flags = partition->flags & XBL_FLAGS_CPU_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +020078
Prasad Kummari07795fa2023-06-08 21:36:38 +053079 return flags >> XBL_FLAGS_CPU_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +020080}
81
82/**
Prasad Kummari07795fa2023-06-08 21:36:38 +053083 * get_xbl_el() - Get the target exception level for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +053084 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +020085 *
Prasad Kummari07795fa2023-06-08 21:36:38 +053086 * Return: XBL_FLAGS_EL0, XBL_FLAGS_EL1, XBL_FLAGS_EL2 or XBL_FLAGS_EL3.
Michal Simekef8f5592015-06-15 14:22:50 +020087 *
Michal Simekef8f5592015-06-15 14:22:50 +020088 */
Prasad Kummari07795fa2023-06-08 21:36:38 +053089static int32_t get_xbl_el(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +020090{
Prasad Kummari07795fa2023-06-08 21:36:38 +053091 uint64_t flags = partition->flags & XBL_FLAGS_EL_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +020092
Prasad Kummari07795fa2023-06-08 21:36:38 +053093 return flags >> XBL_FLAGS_EL_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +020094}
95
96/**
Prasad Kummari07795fa2023-06-08 21:36:38 +053097 * get_xbl_ss() - Get the target security state for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +053098 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +020099 *
Prasad Kummari07795fa2023-06-08 21:36:38 +0530100 * Return: XBL_FLAGS_NON_SECURE or XBL_FLAGS_SECURE.
Michal Simekef8f5592015-06-15 14:22:50 +0200101 *
Michal Simekef8f5592015-06-15 14:22:50 +0200102 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530103static int32_t get_xbl_ss(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +0200104{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530105 uint64_t flags = partition->flags & XBL_FLAGS_TZ_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +0200106
Prasad Kummari07795fa2023-06-08 21:36:38 +0530107 return flags >> XBL_FLAGS_TZ_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +0200108}
109
110/**
Prasad Kummari07795fa2023-06-08 21:36:38 +0530111 * get_xbl_endian() - Get the target endianness for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530112 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +0200113 *
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530114 * Return: SPSR_E_LITTLE or SPSR_E_BIG.
Michal Simekef8f5592015-06-15 14:22:50 +0200115 *
Michal Simekef8f5592015-06-15 14:22:50 +0200116 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530117static int32_t get_xbl_endian(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +0200118{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530119 uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +0200120
Prasad Kummari07795fa2023-06-08 21:36:38 +0530121 flags >>= XBL_FLAGS_ENDIAN_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +0200122
Prasad Kummari07795fa2023-06-08 21:36:38 +0530123 if (flags == XBL_FLAGS_ENDIAN_BE) {
Michal Simekef8f5592015-06-15 14:22:50 +0200124 return SPSR_E_BIG;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530125 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200126 return SPSR_E_LITTLE;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530127 }
Michal Simekef8f5592015-06-15 14:22:50 +0200128}
129
130/**
Prasad Kummari07795fa2023-06-08 21:36:38 +0530131 * get_xbl_estate() - Get the target execution state for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530132 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +0200133 *
Prasad Kummari07795fa2023-06-08 21:36:38 +0530134 * Return: XBL_FLAGS_ESTATE_A32 or XBL_FLAGS_ESTATE_A64.
Michal Simekef8f5592015-06-15 14:22:50 +0200135 *
Michal Simekef8f5592015-06-15 14:22:50 +0200136 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530137static int32_t get_xbl_estate(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +0200138{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530139 uint64_t flags = partition->flags & XBL_FLAGS_ESTATE_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +0200140
Prasad Kummari07795fa2023-06-08 21:36:38 +0530141 return flags >> XBL_FLAGS_ESTATE_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +0200142}
143
Akshay Belsare29ffe142023-06-15 16:48:46 +0530144#if defined(PLAT_versal_net)
145/**
146 * get_xbl_cluster - Get the cluster number
147 * @partition: pointer to the partition structure.
148 *
149 * Return: cluster number for the partition.
150 */
151static int32_t get_xbl_cluster(const struct xbl_partition *partition)
152{
153 uint64_t flags = partition->flags & XBL_FLAGS_CLUSTER_MASK;
154
155 return (int32_t)(flags >> XBL_FLAGS_CLUSTER_SHIFT);
156}
157#endif /* PLAT_versal_net */
158
Michal Simekef8f5592015-06-15 14:22:50 +0200159/**
Prasad Kummari2ef9f0c2023-07-19 11:48:42 +0530160 * xbl_handover() - Populates the bl32 and bl33 image info structures.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530161 * @bl32: BL32 image info structure.
162 * @bl33: BL33 image info structure.
Prasad Kummari2ef9f0c2023-07-19 11:48:42 +0530163 * @handoff_addr: TF-A handoff address.
Michal Simekef8f5592015-06-15 14:22:50 +0200164 *
Prasad Kummari07795fa2023-06-08 21:36:38 +0530165 * Process the handoff parameters from the XBL and populate the BL32 and BL33
Michal Simekef8f5592015-06-15 14:22:50 +0200166 * image info structures accordingly.
Siva Durga Prasad Paladugu8f499722018-05-17 15:17:46 +0530167 *
168 * Return: Return the status of the handoff. The value will be from the
Prasad Kummari07795fa2023-06-08 21:36:38 +0530169 * xbl_handoff enum.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530170 *
Michal Simekef8f5592015-06-15 14:22:50 +0200171 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530172enum xbl_handoff xbl_handover(entry_point_info_t *bl32,
Venkatesh Yadav Abbarapu1463dd52020-01-07 03:25:16 -0700173 entry_point_info_t *bl33,
Prasad Kummari07795fa2023-06-08 21:36:38 +0530174 uint64_t handoff_addr)
Michal Simekef8f5592015-06-15 14:22:50 +0200175{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530176 const struct xbl_handoff_params *HandoffParams;
177
178 if (!handoff_addr) {
179 WARN("BL31: No handoff structure passed\n");
180 return XBL_HANDOFF_NO_STRUCT;
Michal Simekef8f5592015-06-15 14:22:50 +0200181 }
182
Prasad Kummari07795fa2023-06-08 21:36:38 +0530183 HandoffParams = (struct xbl_handoff_params *)handoff_addr;
184 if ((HandoffParams->magic[0] != 'X') ||
185 (HandoffParams->magic[1] != 'L') ||
186 (HandoffParams->magic[2] != 'N') ||
187 (HandoffParams->magic[3] != 'X')) {
188 ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr);
189 return XBL_HANDOFF_INVAL_STRUCT;
Michal Simekef8f5592015-06-15 14:22:50 +0200190 }
191
Prasad Kummarie0783112023-04-26 11:02:07 +0530192 VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530193 handoff_addr, HandoffParams->num_entries);
194 if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) {
Prasad Kummarie0783112023-04-26 11:02:07 +0530195 ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530196 HandoffParams->num_entries, XBL_MAX_PARTITIONS);
197 return XBL_HANDOFF_TOO_MANY_PARTS;
Michal Simekef8f5592015-06-15 14:22:50 +0200198 }
199
200 /*
201 * we loop over all passed entries but only populate two image structs
202 * (bl32, bl33). I.e. the last applicable images in the handoff
203 * structure will be used for the hand off
204 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530205 for (size_t i = 0; i < HandoffParams->num_entries; i++) {
Michal Simekef8f5592015-06-15 14:22:50 +0200206 entry_point_info_t *image;
Venkatesh Yadav Abbarapua2ca35d2022-07-04 11:40:27 +0530207 int32_t target_estate, target_secure, target_cpu;
208 uint32_t target_endianness, target_el;
Michal Simekef8f5592015-06-15 14:22:50 +0200209
Scott Brandene5dcf982020-08-25 13:49:32 -0700210 VERBOSE("BL31: %zd: entry:0x%" PRIx64 ", flags:0x%" PRIx64 "\n", i,
Prasad Kummari07795fa2023-06-08 21:36:38 +0530211 HandoffParams->partition[i].entry_point,
212 HandoffParams->partition[i].flags);
Michal Simekef8f5592015-06-15 14:22:50 +0200213
Akshay Belsare29ffe142023-06-15 16:48:46 +0530214#if defined(PLAT_versal_net)
215 uint32_t target_cluster;
216
217 target_cluster = get_xbl_cluster(&HandoffParams->partition[i]);
218 if (target_cluster != XBL_FLAGS_CLUSTER_0) {
219 WARN("BL31: invalid target Cluster (%i)\n",
220 target_cluster);
221 continue;
222 }
223#endif /* PLAT_versal_net */
224
Prasad Kummari07795fa2023-06-08 21:36:38 +0530225 target_cpu = get_xbl_cpu(&HandoffParams->partition[i]);
226 if (target_cpu != XBL_FLAGS_A53_0) {
Michal Simekef8f5592015-06-15 14:22:50 +0200227 WARN("BL31: invalid target CPU (%i)\n", target_cpu);
228 continue;
229 }
230
Prasad Kummari07795fa2023-06-08 21:36:38 +0530231 target_el = get_xbl_el(&HandoffParams->partition[i]);
232 if ((target_el == XBL_FLAGS_EL3) ||
233 (target_el == XBL_FLAGS_EL0)) {
Akshay Belsared66a7892023-06-22 11:57:18 +0530234 WARN("BL31: invalid target exception level(%i)\n",
235 target_el);
Michal Simekef8f5592015-06-15 14:22:50 +0200236 continue;
237 }
238
Prasad Kummari07795fa2023-06-08 21:36:38 +0530239 target_secure = get_xbl_ss(&HandoffParams->partition[i]);
240 if (target_secure == XBL_FLAGS_SECURE &&
241 target_el == XBL_FLAGS_EL2) {
Michal Simekef8f5592015-06-15 14:22:50 +0200242 WARN("BL31: invalid security state (%i) for exception level (%i)\n",
243 target_secure, target_el);
244 continue;
245 }
246
Prasad Kummari07795fa2023-06-08 21:36:38 +0530247 target_estate = get_xbl_estate(&HandoffParams->partition[i]);
248 target_endianness = get_xbl_endian(&HandoffParams->partition[i]);
Michal Simekef8f5592015-06-15 14:22:50 +0200249
Prasad Kummari07795fa2023-06-08 21:36:38 +0530250 if (target_secure == XBL_FLAGS_SECURE) {
Michal Simekef8f5592015-06-15 14:22:50 +0200251 image = bl32;
252
Prasad Kummari07795fa2023-06-08 21:36:38 +0530253 if (target_estate == XBL_FLAGS_ESTATE_A32) {
Michal Simekef8f5592015-06-15 14:22:50 +0200254 bl32->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
Soren Brinkmann8bcd3052016-05-29 09:48:26 -0700255 target_endianness,
Michal Simekef8f5592015-06-15 14:22:50 +0200256 DISABLE_ALL_EXCEPTIONS);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530257 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200258 bl32->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
259 DISABLE_ALL_EXCEPTIONS);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530260 }
Michal Simekef8f5592015-06-15 14:22:50 +0200261 } else {
262 image = bl33;
263
Prasad Kummari07795fa2023-06-08 21:36:38 +0530264 if (target_estate == XBL_FLAGS_ESTATE_A32) {
265 if (target_el == XBL_FLAGS_EL2) {
Michal Simekef8f5592015-06-15 14:22:50 +0200266 target_el = MODE32_hyp;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530267 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200268 target_el = MODE32_sys;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530269 }
Michal Simekef8f5592015-06-15 14:22:50 +0200270
271 bl33->spsr = SPSR_MODE32(target_el, SPSR_T_ARM,
Soren Brinkmann8bcd3052016-05-29 09:48:26 -0700272 target_endianness,
Michal Simekef8f5592015-06-15 14:22:50 +0200273 DISABLE_ALL_EXCEPTIONS);
274 } else {
Prasad Kummari07795fa2023-06-08 21:36:38 +0530275 if (target_el == XBL_FLAGS_EL2) {
Michal Simekef8f5592015-06-15 14:22:50 +0200276 target_el = MODE_EL2;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530277 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200278 target_el = MODE_EL1;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530279 }
Michal Simekef8f5592015-06-15 14:22:50 +0200280
281 bl33->spsr = SPSR_64(target_el, MODE_SP_ELX,
282 DISABLE_ALL_EXCEPTIONS);
283 }
284 }
285
Scott Brandene5dcf982020-08-25 13:49:32 -0700286 VERBOSE("Setting up %s entry point to:%" PRIx64 ", el:%x\n",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530287 target_secure == XBL_FLAGS_SECURE ? "BL32" : "BL33",
288 HandoffParams->partition[i].entry_point,
Michal Simekef8f5592015-06-15 14:22:50 +0200289 target_el);
Prasad Kummari07795fa2023-06-08 21:36:38 +0530290 image->pc = HandoffParams->partition[i].entry_point;
Michal Simekef8f5592015-06-15 14:22:50 +0200291
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530292 if (target_endianness == SPSR_E_BIG) {
Michal Simekef8f5592015-06-15 14:22:50 +0200293 EP_SET_EE(image->h.attr, EP_EE_BIG);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530294 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200295 EP_SET_EE(image->h.attr, EP_EE_LITTLE);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530296 }
Michal Simekef8f5592015-06-15 14:22:50 +0200297 }
Siva Durga Prasad Paladugu8f499722018-05-17 15:17:46 +0530298
Prasad Kummari07795fa2023-06-08 21:36:38 +0530299 return XBL_HANDOFF_SUCCESS;
Michal Simekef8f5592015-06-15 14:22:50 +0200300}