blob: 034987c45ded89616ac128fd8e82ea5bc85136b1 [file] [log] [blame]
Michal Simekef8f5592015-06-15 14:22:50 +02001/*
Michal Simek2a47faa2023-04-14 08:43:51 +02002 * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
Prasad Kummarie0783112023-04-26 11:02:07 +05303 * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
Michal Simekef8f5592015-06-15 14:22:50 +02004 *
dp-armfa3cf0b2017-05-03 09:38:09 +01005 * SPDX-License-Identifier: BSD-3-Clause
Michal Simekef8f5592015-06-15 14:22:50 +02006 */
7
Michal Simekef8f5592015-06-15 14:22:50 +02008#include <assert.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
10#include <stdint.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <arch_helpers.h>
13#include <common/debug.h>
Venkatesh Yadav Abbarapu1463dd52020-01-07 03:25:16 -070014#include <plat_startup.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015
Michal Simekef8f5592015-06-15 14:22:50 +020016
17/*
Prasad Kummari07795fa2023-06-08 21:36:38 +053018 * HandoffParams
Michal Simekef8f5592015-06-15 14:22:50 +020019 * Parameter bitfield encoding
20 * -----------------------------------------------------------------------------
21 * Exec State 0 0 -> Aarch64, 1-> Aarch32
Soren Brinkmann8bcd3052016-05-29 09:48:26 -070022 * endianness 1 0 -> LE, 1 -> BE
Michal Simekef8f5592015-06-15 14:22:50 +020023 * secure (TZ) 2 0 -> Non secure, 1 -> secure
24 * EL 3:4 00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3
25 * CPU# 5:6 00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3
26 */
27
Prasad Kummari07795fa2023-06-08 21:36:38 +053028#define XBL_FLAGS_ESTATE_SHIFT 0U
29#define XBL_FLAGS_ESTATE_MASK (1U << XBL_FLAGS_ESTATE_SHIFT)
30#define XBL_FLAGS_ESTATE_A64 0U
31#define XBL_FLAGS_ESTATE_A32 1U
Michal Simekef8f5592015-06-15 14:22:50 +020032
Prasad Kummari07795fa2023-06-08 21:36:38 +053033#define XBL_FLAGS_ENDIAN_SHIFT 1U
34#define XBL_FLAGS_ENDIAN_MASK (1U << XBL_FLAGS_ENDIAN_SHIFT)
35#define XBL_FLAGS_ENDIAN_LE 0U
36#define XBL_FLAGS_ENDIAN_BE 1U
Michal Simekef8f5592015-06-15 14:22:50 +020037
Prasad Kummari07795fa2023-06-08 21:36:38 +053038#define XBL_FLAGS_TZ_SHIFT 2U
39#define XBL_FLAGS_TZ_MASK (1U << XBL_FLAGS_TZ_SHIFT)
40#define XBL_FLAGS_NON_SECURE 0U
41#define XBL_FLAGS_SECURE 1U
Michal Simekef8f5592015-06-15 14:22:50 +020042
Prasad Kummari07795fa2023-06-08 21:36:38 +053043#define XBL_FLAGS_EL_SHIFT 3U
44#define XBL_FLAGS_EL_MASK (3U << XBL_FLAGS_EL_SHIFT)
45#define XBL_FLAGS_EL0 0U
46#define XBL_FLAGS_EL1 1U
47#define XBL_FLAGS_EL2 2U
48#define XBL_FLAGS_EL3 3U
Michal Simekef8f5592015-06-15 14:22:50 +020049
Prasad Kummari07795fa2023-06-08 21:36:38 +053050#define XBL_FLAGS_CPU_SHIFT 5U
51#define XBL_FLAGS_CPU_MASK (3U << XBL_FLAGS_CPU_SHIFT)
52#define XBL_FLAGS_A53_0 0U
53#define XBL_FLAGS_A53_1 1U
54#define XBL_FLAGS_A53_2 2U
55#define XBL_FLAGS_A53_3 3U
Michal Simekef8f5592015-06-15 14:22:50 +020056
Michal Simekef8f5592015-06-15 14:22:50 +020057/**
Prasad Kummari07795fa2023-06-08 21:36:38 +053058 * get_xbl_cpu() - Get the target CPU for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +053059 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +020060 *
Prasad Kummari07795fa2023-06-08 21:36:38 +053061 * Return: XBL_FLAGS_A53_0, XBL_FLAGS_A53_1, XBL_FLAGS_A53_2 or XBL_FLAGS_A53_3.
Michal Simekef8f5592015-06-15 14:22:50 +020062 *
Michal Simekef8f5592015-06-15 14:22:50 +020063 */
Prasad Kummari07795fa2023-06-08 21:36:38 +053064static int32_t get_xbl_cpu(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +020065{
Prasad Kummari07795fa2023-06-08 21:36:38 +053066 uint64_t flags = partition->flags & XBL_FLAGS_CPU_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +020067
Prasad Kummari07795fa2023-06-08 21:36:38 +053068 return flags >> XBL_FLAGS_CPU_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +020069}
70
71/**
Prasad Kummari07795fa2023-06-08 21:36:38 +053072 * get_xbl_el() - Get the target exception level for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +053073 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +020074 *
Prasad Kummari07795fa2023-06-08 21:36:38 +053075 * Return: XBL_FLAGS_EL0, XBL_FLAGS_EL1, XBL_FLAGS_EL2 or XBL_FLAGS_EL3.
Michal Simekef8f5592015-06-15 14:22:50 +020076 *
Michal Simekef8f5592015-06-15 14:22:50 +020077 */
Prasad Kummari07795fa2023-06-08 21:36:38 +053078static int32_t get_xbl_el(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +020079{
Prasad Kummari07795fa2023-06-08 21:36:38 +053080 uint64_t flags = partition->flags & XBL_FLAGS_EL_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +020081
Prasad Kummari07795fa2023-06-08 21:36:38 +053082 return flags >> XBL_FLAGS_EL_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +020083}
84
85/**
Prasad Kummari07795fa2023-06-08 21:36:38 +053086 * get_xbl_ss() - Get the target security state for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +053087 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +020088 *
Prasad Kummari07795fa2023-06-08 21:36:38 +053089 * Return: XBL_FLAGS_NON_SECURE or XBL_FLAGS_SECURE.
Michal Simekef8f5592015-06-15 14:22:50 +020090 *
Michal Simekef8f5592015-06-15 14:22:50 +020091 */
Prasad Kummari07795fa2023-06-08 21:36:38 +053092static int32_t get_xbl_ss(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +020093{
Prasad Kummari07795fa2023-06-08 21:36:38 +053094 uint64_t flags = partition->flags & XBL_FLAGS_TZ_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +020095
Prasad Kummari07795fa2023-06-08 21:36:38 +053096 return flags >> XBL_FLAGS_TZ_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +020097}
98
99/**
Prasad Kummari07795fa2023-06-08 21:36:38 +0530100 * get_xbl_endian() - Get the target endianness for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530101 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +0200102 *
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530103 * Return: SPSR_E_LITTLE or SPSR_E_BIG.
Michal Simekef8f5592015-06-15 14:22:50 +0200104 *
Michal Simekef8f5592015-06-15 14:22:50 +0200105 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530106static int32_t get_xbl_endian(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +0200107{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530108 uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +0200109
Prasad Kummari07795fa2023-06-08 21:36:38 +0530110 flags >>= XBL_FLAGS_ENDIAN_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +0200111
Prasad Kummari07795fa2023-06-08 21:36:38 +0530112 if (flags == XBL_FLAGS_ENDIAN_BE) {
Michal Simekef8f5592015-06-15 14:22:50 +0200113 return SPSR_E_BIG;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530114 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200115 return SPSR_E_LITTLE;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530116 }
Michal Simekef8f5592015-06-15 14:22:50 +0200117}
118
119/**
Prasad Kummari07795fa2023-06-08 21:36:38 +0530120 * get_xbl_estate() - Get the target execution state for partition.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530121 * @partition: Pointer to partition struct.
Michal Simekef8f5592015-06-15 14:22:50 +0200122 *
Prasad Kummari07795fa2023-06-08 21:36:38 +0530123 * Return: XBL_FLAGS_ESTATE_A32 or XBL_FLAGS_ESTATE_A64.
Michal Simekef8f5592015-06-15 14:22:50 +0200124 *
Michal Simekef8f5592015-06-15 14:22:50 +0200125 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530126static int32_t get_xbl_estate(const struct xbl_partition *partition)
Michal Simekef8f5592015-06-15 14:22:50 +0200127{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530128 uint64_t flags = partition->flags & XBL_FLAGS_ESTATE_MASK;
Michal Simekef8f5592015-06-15 14:22:50 +0200129
Prasad Kummari07795fa2023-06-08 21:36:38 +0530130 return flags >> XBL_FLAGS_ESTATE_SHIFT;
Michal Simekef8f5592015-06-15 14:22:50 +0200131}
132
133/**
Prasad Kummari07795fa2023-06-08 21:36:38 +0530134 * xbl_tfa_handover() - Populates the bl32 and bl33 image info structures.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530135 * @bl32: BL32 image info structure.
136 * @bl33: BL33 image info structure.
137 * @tfa_handoff_addr: TF-A handoff address.
Michal Simekef8f5592015-06-15 14:22:50 +0200138 *
Prasad Kummari07795fa2023-06-08 21:36:38 +0530139 * Process the handoff parameters from the XBL and populate the BL32 and BL33
Michal Simekef8f5592015-06-15 14:22:50 +0200140 * image info structures accordingly.
Siva Durga Prasad Paladugu8f499722018-05-17 15:17:46 +0530141 *
142 * Return: Return the status of the handoff. The value will be from the
Prasad Kummari07795fa2023-06-08 21:36:38 +0530143 * xbl_handoff enum.
Prasad Kummari7d0623a2023-06-09 14:32:00 +0530144 *
Michal Simekef8f5592015-06-15 14:22:50 +0200145 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530146enum xbl_handoff xbl_handover(entry_point_info_t *bl32,
Venkatesh Yadav Abbarapu1463dd52020-01-07 03:25:16 -0700147 entry_point_info_t *bl33,
Prasad Kummari07795fa2023-06-08 21:36:38 +0530148 uint64_t handoff_addr)
Michal Simekef8f5592015-06-15 14:22:50 +0200149{
Prasad Kummari07795fa2023-06-08 21:36:38 +0530150 const struct xbl_handoff_params *HandoffParams;
151
152 if (!handoff_addr) {
153 WARN("BL31: No handoff structure passed\n");
154 return XBL_HANDOFF_NO_STRUCT;
Michal Simekef8f5592015-06-15 14:22:50 +0200155 }
156
Prasad Kummari07795fa2023-06-08 21:36:38 +0530157 HandoffParams = (struct xbl_handoff_params *)handoff_addr;
158 if ((HandoffParams->magic[0] != 'X') ||
159 (HandoffParams->magic[1] != 'L') ||
160 (HandoffParams->magic[2] != 'N') ||
161 (HandoffParams->magic[3] != 'X')) {
162 ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr);
163 return XBL_HANDOFF_INVAL_STRUCT;
Michal Simekef8f5592015-06-15 14:22:50 +0200164 }
165
Prasad Kummarie0783112023-04-26 11:02:07 +0530166 VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530167 handoff_addr, HandoffParams->num_entries);
168 if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) {
Prasad Kummarie0783112023-04-26 11:02:07 +0530169 ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530170 HandoffParams->num_entries, XBL_MAX_PARTITIONS);
171 return XBL_HANDOFF_TOO_MANY_PARTS;
Michal Simekef8f5592015-06-15 14:22:50 +0200172 }
173
174 /*
175 * we loop over all passed entries but only populate two image structs
176 * (bl32, bl33). I.e. the last applicable images in the handoff
177 * structure will be used for the hand off
178 */
Prasad Kummari07795fa2023-06-08 21:36:38 +0530179 for (size_t i = 0; i < HandoffParams->num_entries; i++) {
Michal Simekef8f5592015-06-15 14:22:50 +0200180 entry_point_info_t *image;
Venkatesh Yadav Abbarapua2ca35d2022-07-04 11:40:27 +0530181 int32_t target_estate, target_secure, target_cpu;
182 uint32_t target_endianness, target_el;
Michal Simekef8f5592015-06-15 14:22:50 +0200183
Scott Brandene5dcf982020-08-25 13:49:32 -0700184 VERBOSE("BL31: %zd: entry:0x%" PRIx64 ", flags:0x%" PRIx64 "\n", i,
Prasad Kummari07795fa2023-06-08 21:36:38 +0530185 HandoffParams->partition[i].entry_point,
186 HandoffParams->partition[i].flags);
Michal Simekef8f5592015-06-15 14:22:50 +0200187
Prasad Kummari07795fa2023-06-08 21:36:38 +0530188 target_cpu = get_xbl_cpu(&HandoffParams->partition[i]);
189 if (target_cpu != XBL_FLAGS_A53_0) {
Michal Simekef8f5592015-06-15 14:22:50 +0200190 WARN("BL31: invalid target CPU (%i)\n", target_cpu);
191 continue;
192 }
193
Prasad Kummari07795fa2023-06-08 21:36:38 +0530194 target_el = get_xbl_el(&HandoffParams->partition[i]);
195 if ((target_el == XBL_FLAGS_EL3) ||
196 (target_el == XBL_FLAGS_EL0)) {
Michal Simekef8f5592015-06-15 14:22:50 +0200197 WARN("BL31: invalid exception level (%i)\n", target_el);
198 continue;
199 }
200
Prasad Kummari07795fa2023-06-08 21:36:38 +0530201 target_secure = get_xbl_ss(&HandoffParams->partition[i]);
202 if (target_secure == XBL_FLAGS_SECURE &&
203 target_el == XBL_FLAGS_EL2) {
Michal Simekef8f5592015-06-15 14:22:50 +0200204 WARN("BL31: invalid security state (%i) for exception level (%i)\n",
205 target_secure, target_el);
206 continue;
207 }
208
Prasad Kummari07795fa2023-06-08 21:36:38 +0530209 target_estate = get_xbl_estate(&HandoffParams->partition[i]);
210 target_endianness = get_xbl_endian(&HandoffParams->partition[i]);
Michal Simekef8f5592015-06-15 14:22:50 +0200211
Prasad Kummari07795fa2023-06-08 21:36:38 +0530212 if (target_secure == XBL_FLAGS_SECURE) {
Michal Simekef8f5592015-06-15 14:22:50 +0200213 image = bl32;
214
Prasad Kummari07795fa2023-06-08 21:36:38 +0530215 if (target_estate == XBL_FLAGS_ESTATE_A32) {
Michal Simekef8f5592015-06-15 14:22:50 +0200216 bl32->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
Soren Brinkmann8bcd3052016-05-29 09:48:26 -0700217 target_endianness,
Michal Simekef8f5592015-06-15 14:22:50 +0200218 DISABLE_ALL_EXCEPTIONS);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530219 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200220 bl32->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
221 DISABLE_ALL_EXCEPTIONS);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530222 }
Michal Simekef8f5592015-06-15 14:22:50 +0200223 } else {
224 image = bl33;
225
Prasad Kummari07795fa2023-06-08 21:36:38 +0530226 if (target_estate == XBL_FLAGS_ESTATE_A32) {
227 if (target_el == XBL_FLAGS_EL2) {
Michal Simekef8f5592015-06-15 14:22:50 +0200228 target_el = MODE32_hyp;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530229 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200230 target_el = MODE32_sys;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530231 }
Michal Simekef8f5592015-06-15 14:22:50 +0200232
233 bl33->spsr = SPSR_MODE32(target_el, SPSR_T_ARM,
Soren Brinkmann8bcd3052016-05-29 09:48:26 -0700234 target_endianness,
Michal Simekef8f5592015-06-15 14:22:50 +0200235 DISABLE_ALL_EXCEPTIONS);
236 } else {
Prasad Kummari07795fa2023-06-08 21:36:38 +0530237 if (target_el == XBL_FLAGS_EL2) {
Michal Simekef8f5592015-06-15 14:22:50 +0200238 target_el = MODE_EL2;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530239 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200240 target_el = MODE_EL1;
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530241 }
Michal Simekef8f5592015-06-15 14:22:50 +0200242
243 bl33->spsr = SPSR_64(target_el, MODE_SP_ELX,
244 DISABLE_ALL_EXCEPTIONS);
245 }
246 }
247
Scott Brandene5dcf982020-08-25 13:49:32 -0700248 VERBOSE("Setting up %s entry point to:%" PRIx64 ", el:%x\n",
Prasad Kummari07795fa2023-06-08 21:36:38 +0530249 target_secure == XBL_FLAGS_SECURE ? "BL32" : "BL33",
250 HandoffParams->partition[i].entry_point,
Michal Simekef8f5592015-06-15 14:22:50 +0200251 target_el);
Prasad Kummari07795fa2023-06-08 21:36:38 +0530252 image->pc = HandoffParams->partition[i].entry_point;
Michal Simekef8f5592015-06-15 14:22:50 +0200253
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530254 if (target_endianness == SPSR_E_BIG) {
Michal Simekef8f5592015-06-15 14:22:50 +0200255 EP_SET_EE(image->h.attr, EP_EE_BIG);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530256 } else {
Michal Simekef8f5592015-06-15 14:22:50 +0200257 EP_SET_EE(image->h.attr, EP_EE_LITTLE);
Venkatesh Yadav Abbarapu987fad32022-04-29 13:52:00 +0530258 }
Michal Simekef8f5592015-06-15 14:22:50 +0200259 }
Siva Durga Prasad Paladugu8f499722018-05-17 15:17:46 +0530260
Prasad Kummari07795fa2023-06-08 21:36:38 +0530261 return XBL_HANDOFF_SUCCESS;
Michal Simekef8f5592015-06-15 14:22:50 +0200262}