blob: 5e232f90f23eb8b1607ed366706493c9bc37b239 [file] [log] [blame]
Jens Wiklanderc2888862014-08-04 15:39:58 +02001/*
Harrison Mutai54bb51a2025-05-27 10:34:05 +00002 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
Jens Wiklanderc2888862014-08-04 15:39:58 +02003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Jens Wiklanderc2888862014-08-04 15:39:58 +02005 */
6
7
8/*******************************************************************************
9 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
10 * plug-in component to the Secure Monitor, registered as a runtime service. The
11 * SPD is expected to be a functional extension of the Secure Payload (SP) that
12 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
13 * the Trusted OS/Applications range to the dispatcher. The SPD will either
14 * handle the request locally or delegate it to the Secure Payload. It is also
15 * responsible for initialising and maintaining communication with the SP.
16 ******************************************************************************/
Jens Wiklanderc2888862014-08-04 15:39:58 +020017#include <assert.h>
Jens Wiklanderc2888862014-08-04 15:39:58 +020018#include <errno.h>
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070019#include <inttypes.h>
Jens Wiklanderc2888862014-08-04 15:39:58 +020020#include <stddef.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000021
22#include <arch_helpers.h>
23#include <bl31/bl31.h>
24#include <common/bl_common.h>
25#include <common/debug.h>
26#include <common/runtime_svc.h>
Jeffrey Kardatzke45521892023-02-09 10:45:35 -080027#include <lib/coreboot.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000028#include <lib/el3_runtime/context_mgmt.h>
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070029#include <lib/optee_utils.h>
Harrison Mutai54bb51a2025-05-27 10:34:05 +000030#if TRANSFER_LIST
Raymond Mao5fe9abb2023-10-04 09:36:21 -070031#include <lib/transfer_list.h>
Harrison Mutai54bb51a2025-05-27 10:34:05 +000032#endif
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070033#include <lib/xlat_tables/xlat_tables_v2.h>
Jeffrey Kardatzke45521892023-02-09 10:45:35 -080034#if OPTEE_ALLOW_SMC_LOAD
35#include <libfdt.h>
36#endif /* OPTEE_ALLOW_SMC_LOAD */
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000037#include <plat/common/platform.h>
Yi Chou097051f2023-04-11 15:57:08 +080038#include <services/oem/chromeos/widevine_smc_handlers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000039#include <tools_share/uuid.h>
40
Jens Wiklanderc2888862014-08-04 15:39:58 +020041#include "opteed_private.h"
Jens Wiklanderc2888862014-08-04 15:39:58 +020042#include "teesmc_opteed.h"
Isla Mitchell99305012017-07-11 14:54:08 +010043
Raymond Mao5fe9abb2023-10-04 09:36:21 -070044#if OPTEE_ALLOW_SMC_LOAD
Harrison Mutai54bb51a2025-05-27 10:34:05 +000045static struct transfer_list_header __maybe_unused *bl31_tl;
Raymond Mao5fe9abb2023-10-04 09:36:21 -070046#endif
47
Jens Wiklanderc2888862014-08-04 15:39:58 +020048/*******************************************************************************
49 * Address of the entrypoint vector table in OPTEE. It is
50 * initialised once on the primary core after a cold boot.
51 ******************************************************************************/
Sandrine Bailleuxb3b6e222018-07-11 12:44:22 +020052struct optee_vectors *optee_vector_table;
Jens Wiklanderc2888862014-08-04 15:39:58 +020053
54/*******************************************************************************
55 * Array to keep track of per-cpu OPTEE state
56 ******************************************************************************/
57optee_context_t opteed_sp_context[OPTEED_CORE_COUNT];
58uint32_t opteed_rw;
59
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070060#if OPTEE_ALLOW_SMC_LOAD
61static bool opteed_allow_load;
Jeffrey Kardatzke85f05c02023-03-02 12:02:51 -080062/* OP-TEE image loading service UUID */
63DEFINE_SVC_UUID2(optee_image_load_uuid,
64 0xb1eafba3, 0x5d31, 0x4612, 0xb9, 0x06,
65 0xc4, 0xc7, 0xa4, 0xbe, 0x3c, 0xc0);
Jeffrey Kardatzke45521892023-02-09 10:45:35 -080066
Yi Chou097051f2023-04-11 15:57:08 +080067#define OPTEED_FDT_SIZE 1024
Jeffrey Kardatzke45521892023-02-09 10:45:35 -080068static uint8_t fdt_buf[OPTEED_FDT_SIZE] __aligned(CACHE_WRITEBACK_GRANULE);
69
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070070#else
Jens Wiklanderc2888862014-08-04 15:39:58 +020071static int32_t opteed_init(void);
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070072#endif
73
74uint64_t dual32to64(uint32_t high, uint32_t low)
75{
76 return ((uint64_t)high << 32) | low;
77}
Jens Wiklanderc2888862014-08-04 15:39:58 +020078
79/*******************************************************************************
80 * This function is the handler registered for S-EL1 interrupts by the
81 * OPTEED. It validates the interrupt and upon success arranges entry into
82 * the OPTEE at 'optee_fiq_entry()' for handling the interrupt.
83 ******************************************************************************/
84static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
85 uint32_t flags,
86 void *handle,
87 void *cookie)
88{
89 uint32_t linear_id;
Jens Wiklanderc2888862014-08-04 15:39:58 +020090 optee_context_t *optee_ctx;
91
Jeffrey Kardatzke916eb9e2024-04-17 10:38:17 -070092#if OPTEE_ALLOW_SMC_LOAD
93 if (optee_vector_table == NULL) {
94 /* OPTEE is not loaded yet, ignore this interrupt */
95 SMC_RET0(handle);
96 }
97#endif
98
Jens Wiklanderc2888862014-08-04 15:39:58 +020099 /* Check the security state when the exception was generated */
100 assert(get_interrupt_src_ss(flags) == NON_SECURE);
101
Jens Wiklanderc2888862014-08-04 15:39:58 +0200102 /* Sanity check the pointer to this cpu's context */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200103 assert(handle == cm_get_context(NON_SECURE));
104
105 /* Save the non-secure context before entering the OPTEE */
106 cm_el1_sysregs_context_save(NON_SECURE);
107
108 /* Get a reference to this cpu's OPTEE context */
Soby Mathewda43b662015-07-08 21:45:46 +0100109 linear_id = plat_my_core_pos();
Jens Wiklanderc2888862014-08-04 15:39:58 +0200110 optee_ctx = &opteed_sp_context[linear_id];
111 assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
112
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100113 cm_set_elr_el3(SECURE, (uint64_t)&optee_vector_table->fiq_entry);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200114 cm_el1_sysregs_context_restore(SECURE);
115 cm_set_next_eret_context(SECURE);
116
117 /*
118 * Tell the OPTEE that it has to handle an FIQ (synchronously).
119 * Also the instruction in normal world where the interrupt was
120 * generated is passed for debugging purposes. It is safe to
121 * retrieve this address from ELR_EL3 as the secure context will
122 * not take effect until el3_exit().
123 */
124 SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3());
125}
126
Jeffrey Kardatzke916eb9e2024-04-17 10:38:17 -0700127/*
128 * Registers an interrupt handler for S-EL1 interrupts when generated during
129 * code executing in the non-secure state. Panics if it fails to do so.
130 */
131static void register_opteed_interrupt_handler(void)
132{
133 u_register_t flags;
134 uint64_t rc;
135
136 flags = 0;
137 set_interrupt_rm_flag(flags, NON_SECURE);
138 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
139 opteed_sel1_interrupt_handler,
140 flags);
141 if (rc)
142 panic();
143}
144
Jens Wiklanderc2888862014-08-04 15:39:58 +0200145/*******************************************************************************
146 * OPTEE Dispatcher setup. The OPTEED finds out the OPTEE entrypoint and type
147 * (aarch32/aarch64) if not already known and initialises the context for entry
148 * into OPTEE for its initialization.
149 ******************************************************************************/
Masahiro Yamada56212752018-04-19 01:14:42 +0900150static int32_t opteed_setup(void)
Jens Wiklanderc2888862014-08-04 15:39:58 +0200151{
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700152#if OPTEE_ALLOW_SMC_LOAD
153 opteed_allow_load = true;
154 INFO("Delaying OP-TEE setup until we receive an SMC call to load it\n");
Jeffrey Kardatzke916eb9e2024-04-17 10:38:17 -0700155 /*
156 * We must register the interrupt handler now so that the interrupt
157 * priorities are not changed after starting the linux kernel.
158 */
159 register_opteed_interrupt_handler();
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700160 return 0;
161#else
Jens Wiklanderc2888862014-08-04 15:39:58 +0200162 entry_point_info_t *optee_ep_info;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200163 uint32_t linear_id;
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700164 uint64_t arg0;
165 uint64_t arg1;
166 uint64_t arg2;
167 uint64_t arg3;
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000168 struct transfer_list_header __maybe_unused *tl = NULL;
169 struct transfer_list_entry __maybe_unused *te = NULL;
170 void __maybe_unused *dt = NULL;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200171
Soby Mathewda43b662015-07-08 21:45:46 +0100172 linear_id = plat_my_core_pos();
Jens Wiklanderc2888862014-08-04 15:39:58 +0200173
174 /*
175 * Get information about the Secure Payload (BL32) image. Its
176 * absence is a critical failure. TODO: Add support to
177 * conditionally include the SPD service
178 */
179 optee_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
180 if (!optee_ep_info) {
181 WARN("No OPTEE provided by BL2 boot loader, Booting device"
182 " without OPTEE initialization. SMC`s destined for OPTEE"
183 " will return SMC_UNK\n");
184 return 1;
185 }
186
187 /*
188 * If there's no valid entry point for SP, we return a non-zero value
189 * signalling failure initializing the service. We bail out without
190 * registering any handlers
191 */
192 if (!optee_ep_info->pc)
193 return 1;
194
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000195#if TRANSFER_LIST
Raymond Mao57fa12c2024-11-14 12:53:25 -0800196 tl = (void *)optee_ep_info->args.arg3;
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000197
198 if (transfer_list_check_header(tl)) {
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700199 te = transfer_list_find(tl, TL_TAG_FDT);
200 dt = transfer_list_entry_data(te);
201
Raymond Mao57fa12c2024-11-14 12:53:25 -0800202 opteed_rw = GET_RW(optee_ep_info->spsr);
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700203 if (opteed_rw == OPTEE_AARCH64) {
Raymond Mao57fa12c2024-11-14 12:53:25 -0800204 if (optee_ep_info->args.arg1 !=
205 TRANSFER_LIST_HANDOFF_X1_VALUE(
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000206 REGISTER_CONVENTION_VERSION))
Raymond Mao57fa12c2024-11-14 12:53:25 -0800207 return 1;
208
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700209 arg0 = (uint64_t)dt;
210 arg2 = 0;
211 } else {
Raymond Mao57fa12c2024-11-14 12:53:25 -0800212 if (optee_ep_info->args.arg1 !=
213 TRANSFER_LIST_HANDOFF_R1_VALUE(
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000214 REGISTER_CONVENTION_VERSION))
Raymond Mao57fa12c2024-11-14 12:53:25 -0800215 return 1;
216
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700217 arg0 = 0;
Raymond Mao57fa12c2024-11-14 12:53:25 -0800218 arg2 = (uint64_t)dt;
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700219 }
220
221 arg1 = optee_ep_info->args.arg1;
222 arg3 = optee_ep_info->args.arg3;
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000223
224 } else
225#endif /* TRANSFER_LIST */
226 {
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700227 /* Default handoff arguments */
228 opteed_rw = optee_ep_info->args.arg0;
229 arg0 = optee_ep_info->args.arg1; /* opteed_pageable_part */
230 arg1 = optee_ep_info->args.arg2; /* opteed_mem_limit */
231 arg2 = optee_ep_info->args.arg3; /* dt_addr */
232 arg3 = 0;
233 }
234
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000235 opteed_init_optee_ep_state(optee_ep_info, opteed_rw,
236 optee_ep_info->pc, arg0, arg1, arg2,
237 arg3, &opteed_sp_context[linear_id]);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200238
239 /*
240 * All OPTEED initialization done. Now register our init function with
241 * BL31 for deferred invocation
242 */
243 bl31_register_bl32_init(&opteed_init);
244
245 return 0;
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700246#endif /* OPTEE_ALLOW_SMC_LOAD */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200247}
248
249/*******************************************************************************
250 * This function passes control to the OPTEE image (BL32) for the first time
251 * on the primary cpu after a cold boot. It assumes that a valid secure
252 * context has already been created by opteed_setup() which can be directly
253 * used. It also assumes that a valid non-secure context has been
254 * initialised by PSCI so it does not need to save and restore any
255 * non-secure state. This function performs a synchronous entry into
Jeffrey Kardatzkeab7e5572023-02-09 11:03:17 -0800256 * OPTEE. OPTEE passes control back to this routine through a SMC. This returns
257 * a non-zero value on success and zero on failure.
Jens Wiklanderc2888862014-08-04 15:39:58 +0200258 ******************************************************************************/
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700259static int32_t
260opteed_init_with_entry_point(entry_point_info_t *optee_entry_point)
Jens Wiklanderc2888862014-08-04 15:39:58 +0200261{
Soby Mathewda43b662015-07-08 21:45:46 +0100262 uint32_t linear_id = plat_my_core_pos();
Jens Wiklanderc2888862014-08-04 15:39:58 +0200263 optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
Jens Wiklanderc2888862014-08-04 15:39:58 +0200264 uint64_t rc;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200265 assert(optee_entry_point);
266
Soby Mathewda43b662015-07-08 21:45:46 +0100267 cm_init_my_context(optee_entry_point);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200268
269 /*
270 * Arrange for an entry into OPTEE. It will be returned via
271 * OPTEE_ENTRY_DONE case
272 */
273 rc = opteed_synchronous_sp_entry(optee_ctx);
274 assert(rc != 0);
275
276 return rc;
277}
278
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700279#if !OPTEE_ALLOW_SMC_LOAD
280static int32_t opteed_init(void)
281{
282 entry_point_info_t *optee_entry_point;
283 /*
284 * Get information about the OP-TEE (BL32) image. Its
285 * absence is a critical failure.
286 */
287 optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
288 return opteed_init_with_entry_point(optee_entry_point);
289}
290#endif /* !OPTEE_ALLOW_SMC_LOAD */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200291
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700292#if OPTEE_ALLOW_SMC_LOAD
Jeffrey Kardatzke45521892023-02-09 10:45:35 -0800293#if COREBOOT
294/*
295 * Adds a firmware/coreboot node with the coreboot table information to a device
296 * tree. Returns zero on success or if there is no coreboot table information;
297 * failure code otherwise.
298 */
299static int add_coreboot_node(void *fdt)
300{
301 int ret;
302 uint64_t coreboot_table_addr;
303 uint32_t coreboot_table_size;
304 struct {
305 uint64_t addr;
306 uint32_t size;
307 } reg_node;
308 coreboot_get_table_location(&coreboot_table_addr, &coreboot_table_size);
309 if (!coreboot_table_addr || !coreboot_table_size) {
310 WARN("Unable to get coreboot table location for device tree");
311 return 0;
312 }
313 ret = fdt_begin_node(fdt, "firmware");
314 if (ret)
315 return ret;
316
317 ret = fdt_property(fdt, "ranges", NULL, 0);
318 if (ret)
319 return ret;
320
321 ret = fdt_begin_node(fdt, "coreboot");
322 if (ret)
323 return ret;
324
325 ret = fdt_property_string(fdt, "compatible", "coreboot");
326 if (ret)
327 return ret;
328
329 reg_node.addr = cpu_to_fdt64(coreboot_table_addr);
330 reg_node.size = cpu_to_fdt32(coreboot_table_size);
331 ret = fdt_property(fdt, "reg", &reg_node,
332 sizeof(uint64_t) + sizeof(uint32_t));
333 if (ret)
334 return ret;
335
336 ret = fdt_end_node(fdt);
337 if (ret)
338 return ret;
339
340 return fdt_end_node(fdt);
341}
342#endif /* COREBOOT */
343
Yi Chou097051f2023-04-11 15:57:08 +0800344#if CROS_WIDEVINE_SMC
345/*
346 * Adds a options/widevine node with the widevine table information to a device
347 * tree. Returns zero on success or if there is no widevine table information;
348 * failure code otherwise.
349 */
350static int add_options_widevine_node(void *fdt)
351{
352 int ret;
353
354 ret = fdt_begin_node(fdt, "options");
355 if (ret)
356 return ret;
357
358 ret = fdt_begin_node(fdt, "op-tee");
359 if (ret)
360 return ret;
361
362 ret = fdt_begin_node(fdt, "widevine");
363 if (ret)
364 return ret;
365
366 if (cros_oem_tpm_auth_pk.length) {
367 ret = fdt_property(fdt, "tcg,tpm-auth-public-key",
368 cros_oem_tpm_auth_pk.buffer,
369 cros_oem_tpm_auth_pk.length);
370 if (ret)
371 return ret;
372 }
373
374 if (cros_oem_huk.length) {
375 ret = fdt_property(fdt, "op-tee,hardware-unique-key",
376 cros_oem_huk.buffer, cros_oem_huk.length);
377 if (ret)
378 return ret;
379 }
380
381 if (cros_oem_rot.length) {
382 ret = fdt_property(fdt, "google,widevine-root-of-trust-ecc-p256",
383 cros_oem_rot.buffer, cros_oem_rot.length);
384 if (ret)
385 return ret;
386 }
387
388 ret = fdt_end_node(fdt);
389 if (ret)
390 return ret;
391
392 ret = fdt_end_node(fdt);
393 if (ret)
394 return ret;
395
396 return fdt_end_node(fdt);
397}
398#endif /* CROS_WIDEVINE_SMC */
399
Jeffrey Kardatzke45521892023-02-09 10:45:35 -0800400/*
401 * Creates a device tree for passing into OP-TEE. Currently is populated with
402 * the coreboot table address.
403 * Returns 0 on success, error code otherwise.
404 */
405static int create_opteed_dt(void)
406{
407 int ret;
408
409 ret = fdt_create(fdt_buf, OPTEED_FDT_SIZE);
410 if (ret)
411 return ret;
412
413 ret = fdt_finish_reservemap(fdt_buf);
414 if (ret)
415 return ret;
416
417 ret = fdt_begin_node(fdt_buf, "");
418 if (ret)
419 return ret;
420
421#if COREBOOT
422 ret = add_coreboot_node(fdt_buf);
423 if (ret)
424 return ret;
425#endif /* COREBOOT */
426
Yi Chou097051f2023-04-11 15:57:08 +0800427#if CROS_WIDEVINE_SMC
428 ret = add_options_widevine_node(fdt_buf);
429 if (ret)
430 return ret;
431#endif /* CROS_WIDEVINE_SMC */
432
Jeffrey Kardatzke45521892023-02-09 10:45:35 -0800433 ret = fdt_end_node(fdt_buf);
434 if (ret)
435 return ret;
436
437 return fdt_finish(fdt_buf);
438}
439
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000440#if TRANSFER_LIST
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700441static int32_t create_smc_tl(const void *fdt, uint32_t fdt_sz)
442{
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700443 bl31_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
444 FW_HANDOFF_SIZE);
445 if (!bl31_tl) {
446 ERROR("Failed to initialize Transfer List at 0x%lx\n",
447 (unsigned long)FW_HANDOFF_BASE);
448 return -1;
449 }
450
451 if (!transfer_list_add(bl31_tl, TL_TAG_FDT, fdt_sz, fdt)) {
452 return -1;
453 }
454 return 0;
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700455}
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000456#endif
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700457
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700458/*******************************************************************************
459 * This function is responsible for handling the SMC that loads the OP-TEE
460 * binary image via a non-secure SMC call. It takes the size and physical
461 * address of the payload as parameters.
462 ******************************************************************************/
463static int32_t opteed_handle_smc_load(uint64_t data_size, uint32_t data_pa)
464{
465 uintptr_t data_va = data_pa;
466 uint64_t mapped_data_pa;
467 uintptr_t mapped_data_va;
468 uint64_t data_map_size;
469 int32_t rc;
470 optee_header_t *image_header;
471 uint8_t *image_ptr;
472 uint64_t target_pa;
473 uint64_t target_end_pa;
474 uint64_t image_pa;
475 uintptr_t image_va;
476 optee_image_t *curr_image;
477 uintptr_t target_va;
478 uint64_t target_size;
479 entry_point_info_t optee_ep_info;
480 uint32_t linear_id = plat_my_core_pos();
Jeffrey Kardatzke45521892023-02-09 10:45:35 -0800481 uint64_t dt_addr = 0;
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700482 uint64_t arg0 = 0;
483 uint64_t arg1 = 0;
484 uint64_t arg2 = 0;
485 uint64_t arg3 = 0;
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700486
487 mapped_data_pa = page_align(data_pa, DOWN);
488 mapped_data_va = mapped_data_pa;
489 data_map_size = page_align(data_size + (mapped_data_pa - data_pa), UP);
490
Jeffrey Kardatzkeab7e5572023-02-09 11:03:17 -0800491 /*
492 * We do not validate the passed in address because we are trusting the
493 * non-secure world at this point still.
494 */
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700495 rc = mmap_add_dynamic_region(mapped_data_pa, mapped_data_va,
496 data_map_size, MT_MEMORY | MT_RO | MT_NS);
497 if (rc != 0) {
498 return rc;
499 }
500
501 image_header = (optee_header_t *)data_va;
502 if (image_header->magic != TEE_MAGIC_NUM_OPTEE ||
503 image_header->version != 2 || image_header->nb_images != 1) {
504 mmap_remove_dynamic_region(mapped_data_va, data_map_size);
505 return -EINVAL;
506 }
507
508 image_ptr = (uint8_t *)data_va + sizeof(optee_header_t) +
509 sizeof(optee_image_t);
510 if (image_header->arch == 1) {
511 opteed_rw = OPTEE_AARCH64;
512 } else {
513 opteed_rw = OPTEE_AARCH32;
514 }
515
516 curr_image = &image_header->optee_image_list[0];
517 image_pa = dual32to64(curr_image->load_addr_hi,
518 curr_image->load_addr_lo);
519 image_va = image_pa;
520 target_end_pa = image_pa + curr_image->size;
521
522 /* Now also map the memory we want to copy it to. */
523 target_pa = page_align(image_pa, DOWN);
524 target_va = target_pa;
525 target_size = page_align(target_end_pa, UP) - target_pa;
526
527 rc = mmap_add_dynamic_region(target_pa, target_va, target_size,
528 MT_MEMORY | MT_RW | MT_SECURE);
529 if (rc != 0) {
530 mmap_remove_dynamic_region(mapped_data_va, data_map_size);
531 return rc;
532 }
533
534 INFO("Loaded OP-TEE via SMC: size %d addr 0x%" PRIx64 "\n",
535 curr_image->size, image_va);
536
537 memcpy((void *)image_va, image_ptr, curr_image->size);
538 flush_dcache_range(target_pa, target_size);
539
540 mmap_remove_dynamic_region(mapped_data_va, data_map_size);
541 mmap_remove_dynamic_region(target_va, target_size);
542
543 /* Save the non-secure state */
544 cm_el1_sysregs_context_save(NON_SECURE);
545
Jeffrey Kardatzke45521892023-02-09 10:45:35 -0800546 rc = create_opteed_dt();
547 if (rc) {
548 ERROR("Failed device tree creation %d\n", rc);
549 return rc;
550 }
551 dt_addr = (uint64_t)fdt_buf;
552 flush_dcache_range(dt_addr, OPTEED_FDT_SIZE);
553
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000554#if TRANSFER_LIST
555 if (!create_smc_tl((void *)dt_addr, OPTEED_FDT_SIZE)) {
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700556 struct transfer_list_entry *te = NULL;
557 void *dt = NULL;
558
559 te = transfer_list_find(bl31_tl, TL_TAG_FDT);
560 dt = transfer_list_entry_data(te);
561
562 if (opteed_rw == OPTEE_AARCH64) {
563 arg0 = (uint64_t)dt;
levi.yun010d2ae2024-05-13 10:27:17 +0100564 arg1 = TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700565 arg2 = 0;
566 } else {
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700567 arg0 = 0;
levi.yun010d2ae2024-05-13 10:27:17 +0100568 arg1 = TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION);
569 arg2 = (uint64_t)dt;
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700570 }
levi.yun010d2ae2024-05-13 10:27:17 +0100571
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700572 arg3 = (uint64_t)bl31_tl;
Harrison Mutai54bb51a2025-05-27 10:34:05 +0000573 } else
574#endif /* TRANSFER_LIST */
575 {
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700576 /* Default handoff arguments */
577 arg2 = dt_addr;
578 }
579
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700580 opteed_init_optee_ep_state(&optee_ep_info,
581 opteed_rw,
582 image_pa,
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700583 arg0,
584 arg1,
585 arg2,
586 arg3,
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700587 &opteed_sp_context[linear_id]);
Jeffrey Kardatzkeab7e5572023-02-09 11:03:17 -0800588 if (opteed_init_with_entry_point(&optee_ep_info) == 0) {
589 rc = -EFAULT;
590 }
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700591
592 /* Restore non-secure state */
593 cm_el1_sysregs_context_restore(NON_SECURE);
594 cm_set_next_eret_context(NON_SECURE);
595
596 return rc;
597}
598#endif /* OPTEE_ALLOW_SMC_LOAD */
599
Jens Wiklanderc2888862014-08-04 15:39:58 +0200600/*******************************************************************************
601 * This function is responsible for handling all SMCs in the Trusted OS/App
602 * range from the non-secure state as defined in the SMC Calling Convention
603 * Document. It is also responsible for communicating with the Secure
604 * payload to delegate work and return results back to the non-secure
605 * state. Lastly it will also return any information that OPTEE needs to do
606 * the work assigned to it.
607 ******************************************************************************/
Masahiro Yamada5ac9d962018-04-19 01:18:48 +0900608static uintptr_t opteed_smc_handler(uint32_t smc_fid,
609 u_register_t x1,
610 u_register_t x2,
611 u_register_t x3,
612 u_register_t x4,
Jens Wiklanderc2888862014-08-04 15:39:58 +0200613 void *cookie,
614 void *handle,
Masahiro Yamada5ac9d962018-04-19 01:18:48 +0900615 u_register_t flags)
Jens Wiklanderc2888862014-08-04 15:39:58 +0200616{
617 cpu_context_t *ns_cpu_context;
Soby Mathewda43b662015-07-08 21:45:46 +0100618 uint32_t linear_id = plat_my_core_pos();
Jens Wiklanderc2888862014-08-04 15:39:58 +0200619 optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
Jens Wiklanderc2888862014-08-04 15:39:58 +0200620
621 /*
622 * Determine which security state this SMC originated from
623 */
624
625 if (is_caller_non_secure(flags)) {
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700626#if OPTEE_ALLOW_SMC_LOAD
Jeffrey Kardatzke85f05c02023-03-02 12:02:51 -0800627 if (opteed_allow_load && smc_fid == NSSMC_OPTEED_CALL_UID) {
628 /* Provide the UUID of the image loading service. */
629 SMC_UUID_RET(handle, optee_image_load_uuid);
630 }
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700631 if (smc_fid == NSSMC_OPTEED_CALL_LOAD_IMAGE) {
632 /*
633 * TODO: Consider wiping the code for SMC loading from
634 * memory after it has been invoked similar to what is
635 * done under RECLAIM_INIT, but extended to happen
636 * later.
637 */
638 if (!opteed_allow_load) {
639 SMC_RET1(handle, -EPERM);
640 }
641
642 opteed_allow_load = false;
643 uint64_t data_size = dual32to64(x1, x2);
644 uint64_t data_pa = dual32to64(x3, x4);
645 if (!data_size || !data_pa) {
646 /*
647 * This is invoked when the OP-TEE image didn't
648 * load correctly in the kernel but we want to
649 * block off loading of it later for security
650 * reasons.
651 */
652 SMC_RET1(handle, -EINVAL);
653 }
654 SMC_RET1(handle, opteed_handle_smc_load(
655 data_size, data_pa));
656 }
657#endif /* OPTEE_ALLOW_SMC_LOAD */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200658 /*
659 * This is a fresh request from the non-secure client.
660 * The parameters are in x1 and x2. Figure out which
661 * registers need to be preserved, save the non-secure
662 * state and send the request to the secure payload.
663 */
664 assert(handle == cm_get_context(NON_SECURE));
665
666 cm_el1_sysregs_context_save(NON_SECURE);
667
668 /*
669 * We are done stashing the non-secure context. Ask the
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700670 * OP-TEE to do the work now. If we are loading vi an SMC,
671 * then we also need to init this CPU context if not done
672 * already.
Jens Wiklanderc2888862014-08-04 15:39:58 +0200673 */
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700674 if (optee_vector_table == NULL) {
675 SMC_RET1(handle, -EINVAL);
676 }
677
678 if (get_optee_pstate(optee_ctx->state) ==
679 OPTEE_PSTATE_UNKNOWN) {
680 opteed_cpu_on_finish_handler(0);
681 }
Jens Wiklanderc2888862014-08-04 15:39:58 +0200682
683 /*
684 * Verify if there is a valid context to use, copy the
685 * operation type and parameters to the secure context
686 * and jump to the fast smc entry point in the secure
687 * payload. Entry into S-EL1 will take place upon exit
688 * from this function.
689 */
690 assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
691
692 /* Set appropriate entry for SMC.
693 * We expect OPTEE to manage the PSTATE.I and PSTATE.F
694 * flags as appropriate.
695 */
696 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
697 cm_set_elr_el3(SECURE, (uint64_t)
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100698 &optee_vector_table->fast_smc_entry);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200699 } else {
700 cm_set_elr_el3(SECURE, (uint64_t)
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100701 &optee_vector_table->yield_smc_entry);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200702 }
703
704 cm_el1_sysregs_context_restore(SECURE);
705 cm_set_next_eret_context(SECURE);
706
Ashutosh Singh3270b842016-03-31 17:18:34 +0100707 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
708 CTX_GPREG_X4,
709 read_ctx_reg(get_gpregs_ctx(handle),
710 CTX_GPREG_X4));
711 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
712 CTX_GPREG_X5,
713 read_ctx_reg(get_gpregs_ctx(handle),
714 CTX_GPREG_X5));
715 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
716 CTX_GPREG_X6,
717 read_ctx_reg(get_gpregs_ctx(handle),
718 CTX_GPREG_X6));
Jens Wiklanderc2888862014-08-04 15:39:58 +0200719 /* Propagate hypervisor client ID */
720 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
721 CTX_GPREG_X7,
722 read_ctx_reg(get_gpregs_ctx(handle),
723 CTX_GPREG_X7));
724
725 SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3);
726 }
727
728 /*
729 * Returning from OPTEE
730 */
731
732 switch (smc_fid) {
733 /*
734 * OPTEE has finished initialising itself after a cold boot
735 */
736 case TEESMC_OPTEED_RETURN_ENTRY_DONE:
737 /*
738 * Stash the OPTEE entry points information. This is done
739 * only once on the primary cpu
740 */
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100741 assert(optee_vector_table == NULL);
742 optee_vector_table = (optee_vectors_t *) x1;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200743
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100744 if (optee_vector_table) {
Jens Wiklanderc2888862014-08-04 15:39:58 +0200745 set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON);
746
747 /*
748 * OPTEE has been successfully initialized.
749 * Register power management hooks with PSCI
750 */
751 psci_register_spd_pm_hook(&opteed_pm);
752
Jeffrey Kardatzke916eb9e2024-04-17 10:38:17 -0700753#if !OPTEE_ALLOW_SMC_LOAD
754 register_opteed_interrupt_handler();
755#endif
Jens Wiklanderc2888862014-08-04 15:39:58 +0200756 }
757
758 /*
759 * OPTEE reports completion. The OPTEED must have initiated
760 * the original request through a synchronous entry into
761 * OPTEE. Jump back to the original C runtime context.
762 */
763 opteed_synchronous_sp_exit(optee_ctx, x1);
Jonathan Wright75a5d8b2018-03-14 15:56:21 +0000764 break;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200765
766
767 /*
768 * These function IDs is used only by OP-TEE to indicate it has
769 * finished:
770 * 1. turning itself on in response to an earlier psci
771 * cpu_on request
772 * 2. resuming itself after an earlier psci cpu_suspend
773 * request.
774 */
775 case TEESMC_OPTEED_RETURN_ON_DONE:
776 case TEESMC_OPTEED_RETURN_RESUME_DONE:
777
778
779 /*
780 * These function IDs is used only by the SP to indicate it has
781 * finished:
782 * 1. suspending itself after an earlier psci cpu_suspend
783 * request.
784 * 2. turning itself off in response to an earlier psci
785 * cpu_off request.
786 */
787 case TEESMC_OPTEED_RETURN_OFF_DONE:
788 case TEESMC_OPTEED_RETURN_SUSPEND_DONE:
789 case TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE:
790 case TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE:
791
792 /*
793 * OPTEE reports completion. The OPTEED must have initiated the
794 * original request through a synchronous entry into OPTEE.
795 * Jump back to the original C runtime context, and pass x1 as
796 * return value to the caller
797 */
798 opteed_synchronous_sp_exit(optee_ctx, x1);
Jonathan Wright75a5d8b2018-03-14 15:56:21 +0000799 break;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200800
801 /*
802 * OPTEE is returning from a call or being preempted from a call, in
803 * either case execution should resume in the normal world.
804 */
805 case TEESMC_OPTEED_RETURN_CALL_DONE:
806 /*
807 * This is the result from the secure client of an
808 * earlier request. The results are in x0-x3. Copy it
809 * into the non-secure context, save the secure state
810 * and return to the non-secure state.
811 */
812 assert(handle == cm_get_context(SECURE));
813 cm_el1_sysregs_context_save(SECURE);
814
815 /* Get a reference to the non-secure context */
816 ns_cpu_context = cm_get_context(NON_SECURE);
817 assert(ns_cpu_context);
818
819 /* Restore non-secure state */
820 cm_el1_sysregs_context_restore(NON_SECURE);
821 cm_set_next_eret_context(NON_SECURE);
822
823 SMC_RET4(ns_cpu_context, x1, x2, x3, x4);
824
825 /*
826 * OPTEE has finished handling a S-EL1 FIQ interrupt. Execution
827 * should resume in the normal world.
828 */
829 case TEESMC_OPTEED_RETURN_FIQ_DONE:
830 /* Get a reference to the non-secure context */
831 ns_cpu_context = cm_get_context(NON_SECURE);
832 assert(ns_cpu_context);
833
834 /*
835 * Restore non-secure state. There is no need to save the
836 * secure system register context since OPTEE was supposed
837 * to preserve it during S-EL1 interrupt handling.
838 */
839 cm_el1_sysregs_context_restore(NON_SECURE);
840 cm_set_next_eret_context(NON_SECURE);
841
842 SMC_RET0((uint64_t) ns_cpu_context);
843
844 default:
845 panic();
846 }
847}
848
849/* Define an OPTEED runtime service descriptor for fast SMC calls */
850DECLARE_RT_SVC(
851 opteed_fast,
852
853 OEN_TOS_START,
854 OEN_TOS_END,
855 SMC_TYPE_FAST,
856 opteed_setup,
857 opteed_smc_handler
858);
859
David Cunadoc8833ea2017-04-16 17:15:08 +0100860/* Define an OPTEED runtime service descriptor for yielding SMC calls */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200861DECLARE_RT_SVC(
862 opteed_std,
863
864 OEN_TOS_START,
865 OEN_TOS_END,
David Cunadoc8833ea2017-04-16 17:15:08 +0100866 SMC_TYPE_YIELD,
Jens Wiklanderc2888862014-08-04 15:39:58 +0200867 NULL,
868 opteed_smc_handler
869);