blob: ab9896ec2cf95ba7774d5c8f2e4463c445260745 [file] [log] [blame]
Jens Wiklanderc2888862014-08-04 15:39:58 +02001/*
Raymond Mao5fe9abb2023-10-04 09:36:21 -07002 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
Jens Wiklanderc2888862014-08-04 15:39:58 +02003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Jens Wiklanderc2888862014-08-04 15:39:58 +02005 */
6
7
8/*******************************************************************************
9 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
10 * plug-in component to the Secure Monitor, registered as a runtime service. The
11 * SPD is expected to be a functional extension of the Secure Payload (SP) that
12 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
13 * the Trusted OS/Applications range to the dispatcher. The SPD will either
14 * handle the request locally or delegate it to the Secure Payload. It is also
15 * responsible for initialising and maintaining communication with the SP.
16 ******************************************************************************/
Jens Wiklanderc2888862014-08-04 15:39:58 +020017#include <assert.h>
Jens Wiklanderc2888862014-08-04 15:39:58 +020018#include <errno.h>
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070019#include <inttypes.h>
Jens Wiklanderc2888862014-08-04 15:39:58 +020020#include <stddef.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000021
22#include <arch_helpers.h>
23#include <bl31/bl31.h>
24#include <common/bl_common.h>
25#include <common/debug.h>
26#include <common/runtime_svc.h>
Jeffrey Kardatzke45521892023-02-09 10:45:35 -080027#include <lib/coreboot.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000028#include <lib/el3_runtime/context_mgmt.h>
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070029#include <lib/optee_utils.h>
Raymond Mao5fe9abb2023-10-04 09:36:21 -070030#include <lib/transfer_list.h>
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070031#include <lib/xlat_tables/xlat_tables_v2.h>
Jeffrey Kardatzke45521892023-02-09 10:45:35 -080032#if OPTEE_ALLOW_SMC_LOAD
33#include <libfdt.h>
34#endif /* OPTEE_ALLOW_SMC_LOAD */
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000035#include <plat/common/platform.h>
36#include <tools_share/uuid.h>
37
Jens Wiklanderc2888862014-08-04 15:39:58 +020038#include "opteed_private.h"
Jens Wiklanderc2888862014-08-04 15:39:58 +020039#include "teesmc_opteed.h"
Isla Mitchell99305012017-07-11 14:54:08 +010040
Raymond Mao5fe9abb2023-10-04 09:36:21 -070041#if OPTEE_ALLOW_SMC_LOAD
42static struct transfer_list_header *bl31_tl;
43#endif
44
Jens Wiklanderc2888862014-08-04 15:39:58 +020045/*******************************************************************************
46 * Address of the entrypoint vector table in OPTEE. It is
47 * initialised once on the primary core after a cold boot.
48 ******************************************************************************/
Sandrine Bailleuxb3b6e222018-07-11 12:44:22 +020049struct optee_vectors *optee_vector_table;
Jens Wiklanderc2888862014-08-04 15:39:58 +020050
51/*******************************************************************************
52 * Array to keep track of per-cpu OPTEE state
53 ******************************************************************************/
54optee_context_t opteed_sp_context[OPTEED_CORE_COUNT];
55uint32_t opteed_rw;
56
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070057#if OPTEE_ALLOW_SMC_LOAD
58static bool opteed_allow_load;
Jeffrey Kardatzke85f05c02023-03-02 12:02:51 -080059/* OP-TEE image loading service UUID */
60DEFINE_SVC_UUID2(optee_image_load_uuid,
61 0xb1eafba3, 0x5d31, 0x4612, 0xb9, 0x06,
62 0xc4, 0xc7, 0xa4, 0xbe, 0x3c, 0xc0);
Jeffrey Kardatzke45521892023-02-09 10:45:35 -080063
64#define OPTEED_FDT_SIZE 256
65static uint8_t fdt_buf[OPTEED_FDT_SIZE] __aligned(CACHE_WRITEBACK_GRANULE);
66
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070067#else
Jens Wiklanderc2888862014-08-04 15:39:58 +020068static int32_t opteed_init(void);
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -070069#endif
70
71uint64_t dual32to64(uint32_t high, uint32_t low)
72{
73 return ((uint64_t)high << 32) | low;
74}
Jens Wiklanderc2888862014-08-04 15:39:58 +020075
76/*******************************************************************************
77 * This function is the handler registered for S-EL1 interrupts by the
78 * OPTEED. It validates the interrupt and upon success arranges entry into
79 * the OPTEE at 'optee_fiq_entry()' for handling the interrupt.
80 ******************************************************************************/
81static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
82 uint32_t flags,
83 void *handle,
84 void *cookie)
85{
86 uint32_t linear_id;
Jens Wiklanderc2888862014-08-04 15:39:58 +020087 optee_context_t *optee_ctx;
88
89 /* Check the security state when the exception was generated */
90 assert(get_interrupt_src_ss(flags) == NON_SECURE);
91
Jens Wiklanderc2888862014-08-04 15:39:58 +020092 /* Sanity check the pointer to this cpu's context */
Jens Wiklanderc2888862014-08-04 15:39:58 +020093 assert(handle == cm_get_context(NON_SECURE));
94
95 /* Save the non-secure context before entering the OPTEE */
96 cm_el1_sysregs_context_save(NON_SECURE);
97
98 /* Get a reference to this cpu's OPTEE context */
Soby Mathewda43b662015-07-08 21:45:46 +010099 linear_id = plat_my_core_pos();
Jens Wiklanderc2888862014-08-04 15:39:58 +0200100 optee_ctx = &opteed_sp_context[linear_id];
101 assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
102
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100103 cm_set_elr_el3(SECURE, (uint64_t)&optee_vector_table->fiq_entry);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200104 cm_el1_sysregs_context_restore(SECURE);
105 cm_set_next_eret_context(SECURE);
106
107 /*
108 * Tell the OPTEE that it has to handle an FIQ (synchronously).
109 * Also the instruction in normal world where the interrupt was
110 * generated is passed for debugging purposes. It is safe to
111 * retrieve this address from ELR_EL3 as the secure context will
112 * not take effect until el3_exit().
113 */
114 SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3());
115}
116
117/*******************************************************************************
118 * OPTEE Dispatcher setup. The OPTEED finds out the OPTEE entrypoint and type
119 * (aarch32/aarch64) if not already known and initialises the context for entry
120 * into OPTEE for its initialization.
121 ******************************************************************************/
Masahiro Yamada56212752018-04-19 01:14:42 +0900122static int32_t opteed_setup(void)
Jens Wiklanderc2888862014-08-04 15:39:58 +0200123{
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700124#if OPTEE_ALLOW_SMC_LOAD
125 opteed_allow_load = true;
126 INFO("Delaying OP-TEE setup until we receive an SMC call to load it\n");
127 return 0;
128#else
Jens Wiklanderc2888862014-08-04 15:39:58 +0200129 entry_point_info_t *optee_ep_info;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200130 uint32_t linear_id;
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700131 uint64_t arg0;
132 uint64_t arg1;
133 uint64_t arg2;
134 uint64_t arg3;
135 struct transfer_list_header *tl = NULL;
136 struct transfer_list_entry *te = NULL;
137 void *dt = NULL;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200138
Soby Mathewda43b662015-07-08 21:45:46 +0100139 linear_id = plat_my_core_pos();
Jens Wiklanderc2888862014-08-04 15:39:58 +0200140
141 /*
142 * Get information about the Secure Payload (BL32) image. Its
143 * absence is a critical failure. TODO: Add support to
144 * conditionally include the SPD service
145 */
146 optee_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
147 if (!optee_ep_info) {
148 WARN("No OPTEE provided by BL2 boot loader, Booting device"
149 " without OPTEE initialization. SMC`s destined for OPTEE"
150 " will return SMC_UNK\n");
151 return 1;
152 }
153
154 /*
155 * If there's no valid entry point for SP, we return a non-zero value
156 * signalling failure initializing the service. We bail out without
157 * registering any handlers
158 */
159 if (!optee_ep_info->pc)
160 return 1;
161
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700162 if (TRANSFER_LIST &&
163 optee_ep_info->args.arg1 == (TRANSFER_LIST_SIGNATURE |
164 REGISTER_CONVENTION_VERSION_MASK)) {
165 tl = (void *)optee_ep_info->args.arg3;
166 if (transfer_list_check_header(tl) == TL_OPS_NON) {
167 return 1;
168 }
Edison Ai5d685d32017-07-18 16:52:26 +0800169
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700170 opteed_rw = GET_RW(optee_ep_info->spsr);
171 te = transfer_list_find(tl, TL_TAG_FDT);
172 dt = transfer_list_entry_data(te);
173
174 if (opteed_rw == OPTEE_AARCH64) {
175 arg0 = (uint64_t)dt;
176 arg2 = 0;
177 } else {
178 arg2 = (uint64_t)dt;
179 arg0 = 0;
180 }
181
182 arg1 = optee_ep_info->args.arg1;
183 arg3 = optee_ep_info->args.arg3;
184 } else {
185 /* Default handoff arguments */
186 opteed_rw = optee_ep_info->args.arg0;
187 arg0 = optee_ep_info->args.arg1; /* opteed_pageable_part */
188 arg1 = optee_ep_info->args.arg2; /* opteed_mem_limit */
189 arg2 = optee_ep_info->args.arg3; /* dt_addr */
190 arg3 = 0;
191 }
192
193 opteed_init_optee_ep_state(optee_ep_info, opteed_rw, optee_ep_info->pc,
194 arg0, arg1, arg2, arg3,
Jens Wiklanderc2888862014-08-04 15:39:58 +0200195 &opteed_sp_context[linear_id]);
196
197 /*
198 * All OPTEED initialization done. Now register our init function with
199 * BL31 for deferred invocation
200 */
201 bl31_register_bl32_init(&opteed_init);
202
203 return 0;
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700204#endif /* OPTEE_ALLOW_SMC_LOAD */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200205}
206
207/*******************************************************************************
208 * This function passes control to the OPTEE image (BL32) for the first time
209 * on the primary cpu after a cold boot. It assumes that a valid secure
210 * context has already been created by opteed_setup() which can be directly
211 * used. It also assumes that a valid non-secure context has been
212 * initialised by PSCI so it does not need to save and restore any
213 * non-secure state. This function performs a synchronous entry into
Jeffrey Kardatzkeab7e5572023-02-09 11:03:17 -0800214 * OPTEE. OPTEE passes control back to this routine through a SMC. This returns
215 * a non-zero value on success and zero on failure.
Jens Wiklanderc2888862014-08-04 15:39:58 +0200216 ******************************************************************************/
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700217static int32_t
218opteed_init_with_entry_point(entry_point_info_t *optee_entry_point)
Jens Wiklanderc2888862014-08-04 15:39:58 +0200219{
Soby Mathewda43b662015-07-08 21:45:46 +0100220 uint32_t linear_id = plat_my_core_pos();
Jens Wiklanderc2888862014-08-04 15:39:58 +0200221 optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
Jens Wiklanderc2888862014-08-04 15:39:58 +0200222 uint64_t rc;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200223 assert(optee_entry_point);
224
Soby Mathewda43b662015-07-08 21:45:46 +0100225 cm_init_my_context(optee_entry_point);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200226
227 /*
228 * Arrange for an entry into OPTEE. It will be returned via
229 * OPTEE_ENTRY_DONE case
230 */
231 rc = opteed_synchronous_sp_entry(optee_ctx);
232 assert(rc != 0);
233
234 return rc;
235}
236
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700237#if !OPTEE_ALLOW_SMC_LOAD
238static int32_t opteed_init(void)
239{
240 entry_point_info_t *optee_entry_point;
241 /*
242 * Get information about the OP-TEE (BL32) image. Its
243 * absence is a critical failure.
244 */
245 optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
246 return opteed_init_with_entry_point(optee_entry_point);
247}
248#endif /* !OPTEE_ALLOW_SMC_LOAD */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200249
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700250#if OPTEE_ALLOW_SMC_LOAD
Jeffrey Kardatzke45521892023-02-09 10:45:35 -0800251#if COREBOOT
252/*
253 * Adds a firmware/coreboot node with the coreboot table information to a device
254 * tree. Returns zero on success or if there is no coreboot table information;
255 * failure code otherwise.
256 */
257static int add_coreboot_node(void *fdt)
258{
259 int ret;
260 uint64_t coreboot_table_addr;
261 uint32_t coreboot_table_size;
262 struct {
263 uint64_t addr;
264 uint32_t size;
265 } reg_node;
266 coreboot_get_table_location(&coreboot_table_addr, &coreboot_table_size);
267 if (!coreboot_table_addr || !coreboot_table_size) {
268 WARN("Unable to get coreboot table location for device tree");
269 return 0;
270 }
271 ret = fdt_begin_node(fdt, "firmware");
272 if (ret)
273 return ret;
274
275 ret = fdt_property(fdt, "ranges", NULL, 0);
276 if (ret)
277 return ret;
278
279 ret = fdt_begin_node(fdt, "coreboot");
280 if (ret)
281 return ret;
282
283 ret = fdt_property_string(fdt, "compatible", "coreboot");
284 if (ret)
285 return ret;
286
287 reg_node.addr = cpu_to_fdt64(coreboot_table_addr);
288 reg_node.size = cpu_to_fdt32(coreboot_table_size);
289 ret = fdt_property(fdt, "reg", &reg_node,
290 sizeof(uint64_t) + sizeof(uint32_t));
291 if (ret)
292 return ret;
293
294 ret = fdt_end_node(fdt);
295 if (ret)
296 return ret;
297
298 return fdt_end_node(fdt);
299}
300#endif /* COREBOOT */
301
302/*
303 * Creates a device tree for passing into OP-TEE. Currently is populated with
304 * the coreboot table address.
305 * Returns 0 on success, error code otherwise.
306 */
307static int create_opteed_dt(void)
308{
309 int ret;
310
311 ret = fdt_create(fdt_buf, OPTEED_FDT_SIZE);
312 if (ret)
313 return ret;
314
315 ret = fdt_finish_reservemap(fdt_buf);
316 if (ret)
317 return ret;
318
319 ret = fdt_begin_node(fdt_buf, "");
320 if (ret)
321 return ret;
322
323#if COREBOOT
324 ret = add_coreboot_node(fdt_buf);
325 if (ret)
326 return ret;
327#endif /* COREBOOT */
328
329 ret = fdt_end_node(fdt_buf);
330 if (ret)
331 return ret;
332
333 return fdt_finish(fdt_buf);
334}
335
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700336static int32_t create_smc_tl(const void *fdt, uint32_t fdt_sz)
337{
338#if TRANSFER_LIST
339 bl31_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
340 FW_HANDOFF_SIZE);
341 if (!bl31_tl) {
342 ERROR("Failed to initialize Transfer List at 0x%lx\n",
343 (unsigned long)FW_HANDOFF_BASE);
344 return -1;
345 }
346
347 if (!transfer_list_add(bl31_tl, TL_TAG_FDT, fdt_sz, fdt)) {
348 return -1;
349 }
350 return 0;
351#else
352 return -1;
353#endif
354}
355
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700356/*******************************************************************************
357 * This function is responsible for handling the SMC that loads the OP-TEE
358 * binary image via a non-secure SMC call. It takes the size and physical
359 * address of the payload as parameters.
360 ******************************************************************************/
361static int32_t opteed_handle_smc_load(uint64_t data_size, uint32_t data_pa)
362{
363 uintptr_t data_va = data_pa;
364 uint64_t mapped_data_pa;
365 uintptr_t mapped_data_va;
366 uint64_t data_map_size;
367 int32_t rc;
368 optee_header_t *image_header;
369 uint8_t *image_ptr;
370 uint64_t target_pa;
371 uint64_t target_end_pa;
372 uint64_t image_pa;
373 uintptr_t image_va;
374 optee_image_t *curr_image;
375 uintptr_t target_va;
376 uint64_t target_size;
377 entry_point_info_t optee_ep_info;
378 uint32_t linear_id = plat_my_core_pos();
Jeffrey Kardatzke45521892023-02-09 10:45:35 -0800379 uint64_t dt_addr = 0;
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700380 uint64_t arg0 = 0;
381 uint64_t arg1 = 0;
382 uint64_t arg2 = 0;
383 uint64_t arg3 = 0;
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700384
385 mapped_data_pa = page_align(data_pa, DOWN);
386 mapped_data_va = mapped_data_pa;
387 data_map_size = page_align(data_size + (mapped_data_pa - data_pa), UP);
388
Jeffrey Kardatzkeab7e5572023-02-09 11:03:17 -0800389 /*
390 * We do not validate the passed in address because we are trusting the
391 * non-secure world at this point still.
392 */
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700393 rc = mmap_add_dynamic_region(mapped_data_pa, mapped_data_va,
394 data_map_size, MT_MEMORY | MT_RO | MT_NS);
395 if (rc != 0) {
396 return rc;
397 }
398
399 image_header = (optee_header_t *)data_va;
400 if (image_header->magic != TEE_MAGIC_NUM_OPTEE ||
401 image_header->version != 2 || image_header->nb_images != 1) {
402 mmap_remove_dynamic_region(mapped_data_va, data_map_size);
403 return -EINVAL;
404 }
405
406 image_ptr = (uint8_t *)data_va + sizeof(optee_header_t) +
407 sizeof(optee_image_t);
408 if (image_header->arch == 1) {
409 opteed_rw = OPTEE_AARCH64;
410 } else {
411 opteed_rw = OPTEE_AARCH32;
412 }
413
414 curr_image = &image_header->optee_image_list[0];
415 image_pa = dual32to64(curr_image->load_addr_hi,
416 curr_image->load_addr_lo);
417 image_va = image_pa;
418 target_end_pa = image_pa + curr_image->size;
419
420 /* Now also map the memory we want to copy it to. */
421 target_pa = page_align(image_pa, DOWN);
422 target_va = target_pa;
423 target_size = page_align(target_end_pa, UP) - target_pa;
424
425 rc = mmap_add_dynamic_region(target_pa, target_va, target_size,
426 MT_MEMORY | MT_RW | MT_SECURE);
427 if (rc != 0) {
428 mmap_remove_dynamic_region(mapped_data_va, data_map_size);
429 return rc;
430 }
431
432 INFO("Loaded OP-TEE via SMC: size %d addr 0x%" PRIx64 "\n",
433 curr_image->size, image_va);
434
435 memcpy((void *)image_va, image_ptr, curr_image->size);
436 flush_dcache_range(target_pa, target_size);
437
438 mmap_remove_dynamic_region(mapped_data_va, data_map_size);
439 mmap_remove_dynamic_region(target_va, target_size);
440
441 /* Save the non-secure state */
442 cm_el1_sysregs_context_save(NON_SECURE);
443
Jeffrey Kardatzke45521892023-02-09 10:45:35 -0800444 rc = create_opteed_dt();
445 if (rc) {
446 ERROR("Failed device tree creation %d\n", rc);
447 return rc;
448 }
449 dt_addr = (uint64_t)fdt_buf;
450 flush_dcache_range(dt_addr, OPTEED_FDT_SIZE);
451
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700452 if (TRANSFER_LIST &&
453 !create_smc_tl((void *)dt_addr, OPTEED_FDT_SIZE)) {
454 struct transfer_list_entry *te = NULL;
455 void *dt = NULL;
456
457 te = transfer_list_find(bl31_tl, TL_TAG_FDT);
458 dt = transfer_list_entry_data(te);
459
460 if (opteed_rw == OPTEE_AARCH64) {
461 arg0 = (uint64_t)dt;
462 arg2 = 0;
463 } else {
464 arg2 = (uint64_t)dt;
465 arg0 = 0;
466 }
467 arg1 = TRANSFER_LIST_SIGNATURE |
468 REGISTER_CONVENTION_VERSION_MASK;
469 arg3 = (uint64_t)bl31_tl;
470 } else {
471 /* Default handoff arguments */
472 arg2 = dt_addr;
473 }
474
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700475 opteed_init_optee_ep_state(&optee_ep_info,
476 opteed_rw,
477 image_pa,
Raymond Mao5fe9abb2023-10-04 09:36:21 -0700478 arg0,
479 arg1,
480 arg2,
481 arg3,
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700482 &opteed_sp_context[linear_id]);
Jeffrey Kardatzkeab7e5572023-02-09 11:03:17 -0800483 if (opteed_init_with_entry_point(&optee_ep_info) == 0) {
484 rc = -EFAULT;
485 }
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700486
487 /* Restore non-secure state */
488 cm_el1_sysregs_context_restore(NON_SECURE);
489 cm_set_next_eret_context(NON_SECURE);
490
491 return rc;
492}
493#endif /* OPTEE_ALLOW_SMC_LOAD */
494
Jens Wiklanderc2888862014-08-04 15:39:58 +0200495/*******************************************************************************
496 * This function is responsible for handling all SMCs in the Trusted OS/App
497 * range from the non-secure state as defined in the SMC Calling Convention
498 * Document. It is also responsible for communicating with the Secure
499 * payload to delegate work and return results back to the non-secure
500 * state. Lastly it will also return any information that OPTEE needs to do
501 * the work assigned to it.
502 ******************************************************************************/
Masahiro Yamada5ac9d962018-04-19 01:18:48 +0900503static uintptr_t opteed_smc_handler(uint32_t smc_fid,
504 u_register_t x1,
505 u_register_t x2,
506 u_register_t x3,
507 u_register_t x4,
Jens Wiklanderc2888862014-08-04 15:39:58 +0200508 void *cookie,
509 void *handle,
Masahiro Yamada5ac9d962018-04-19 01:18:48 +0900510 u_register_t flags)
Jens Wiklanderc2888862014-08-04 15:39:58 +0200511{
512 cpu_context_t *ns_cpu_context;
Soby Mathewda43b662015-07-08 21:45:46 +0100513 uint32_t linear_id = plat_my_core_pos();
Jens Wiklanderc2888862014-08-04 15:39:58 +0200514 optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
515 uint64_t rc;
516
517 /*
518 * Determine which security state this SMC originated from
519 */
520
521 if (is_caller_non_secure(flags)) {
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700522#if OPTEE_ALLOW_SMC_LOAD
Jeffrey Kardatzke85f05c02023-03-02 12:02:51 -0800523 if (opteed_allow_load && smc_fid == NSSMC_OPTEED_CALL_UID) {
524 /* Provide the UUID of the image loading service. */
525 SMC_UUID_RET(handle, optee_image_load_uuid);
526 }
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700527 if (smc_fid == NSSMC_OPTEED_CALL_LOAD_IMAGE) {
528 /*
529 * TODO: Consider wiping the code for SMC loading from
530 * memory after it has been invoked similar to what is
531 * done under RECLAIM_INIT, but extended to happen
532 * later.
533 */
534 if (!opteed_allow_load) {
535 SMC_RET1(handle, -EPERM);
536 }
537
538 opteed_allow_load = false;
539 uint64_t data_size = dual32to64(x1, x2);
540 uint64_t data_pa = dual32to64(x3, x4);
541 if (!data_size || !data_pa) {
542 /*
543 * This is invoked when the OP-TEE image didn't
544 * load correctly in the kernel but we want to
545 * block off loading of it later for security
546 * reasons.
547 */
548 SMC_RET1(handle, -EINVAL);
549 }
550 SMC_RET1(handle, opteed_handle_smc_load(
551 data_size, data_pa));
552 }
553#endif /* OPTEE_ALLOW_SMC_LOAD */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200554 /*
555 * This is a fresh request from the non-secure client.
556 * The parameters are in x1 and x2. Figure out which
557 * registers need to be preserved, save the non-secure
558 * state and send the request to the secure payload.
559 */
560 assert(handle == cm_get_context(NON_SECURE));
561
562 cm_el1_sysregs_context_save(NON_SECURE);
563
564 /*
565 * We are done stashing the non-secure context. Ask the
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700566 * OP-TEE to do the work now. If we are loading vi an SMC,
567 * then we also need to init this CPU context if not done
568 * already.
Jens Wiklanderc2888862014-08-04 15:39:58 +0200569 */
Jeffrey Kardatzke7e6b09a2022-10-03 15:50:21 -0700570 if (optee_vector_table == NULL) {
571 SMC_RET1(handle, -EINVAL);
572 }
573
574 if (get_optee_pstate(optee_ctx->state) ==
575 OPTEE_PSTATE_UNKNOWN) {
576 opteed_cpu_on_finish_handler(0);
577 }
Jens Wiklanderc2888862014-08-04 15:39:58 +0200578
579 /*
580 * Verify if there is a valid context to use, copy the
581 * operation type and parameters to the secure context
582 * and jump to the fast smc entry point in the secure
583 * payload. Entry into S-EL1 will take place upon exit
584 * from this function.
585 */
586 assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
587
588 /* Set appropriate entry for SMC.
589 * We expect OPTEE to manage the PSTATE.I and PSTATE.F
590 * flags as appropriate.
591 */
592 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
593 cm_set_elr_el3(SECURE, (uint64_t)
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100594 &optee_vector_table->fast_smc_entry);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200595 } else {
596 cm_set_elr_el3(SECURE, (uint64_t)
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100597 &optee_vector_table->yield_smc_entry);
Jens Wiklanderc2888862014-08-04 15:39:58 +0200598 }
599
600 cm_el1_sysregs_context_restore(SECURE);
601 cm_set_next_eret_context(SECURE);
602
Ashutosh Singh3270b842016-03-31 17:18:34 +0100603 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
604 CTX_GPREG_X4,
605 read_ctx_reg(get_gpregs_ctx(handle),
606 CTX_GPREG_X4));
607 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
608 CTX_GPREG_X5,
609 read_ctx_reg(get_gpregs_ctx(handle),
610 CTX_GPREG_X5));
611 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
612 CTX_GPREG_X6,
613 read_ctx_reg(get_gpregs_ctx(handle),
614 CTX_GPREG_X6));
Jens Wiklanderc2888862014-08-04 15:39:58 +0200615 /* Propagate hypervisor client ID */
616 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
617 CTX_GPREG_X7,
618 read_ctx_reg(get_gpregs_ctx(handle),
619 CTX_GPREG_X7));
620
621 SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3);
622 }
623
624 /*
625 * Returning from OPTEE
626 */
627
628 switch (smc_fid) {
629 /*
630 * OPTEE has finished initialising itself after a cold boot
631 */
632 case TEESMC_OPTEED_RETURN_ENTRY_DONE:
633 /*
634 * Stash the OPTEE entry points information. This is done
635 * only once on the primary cpu
636 */
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100637 assert(optee_vector_table == NULL);
638 optee_vector_table = (optee_vectors_t *) x1;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200639
Daniel Boulbyc5259cc2018-05-15 11:41:55 +0100640 if (optee_vector_table) {
Jens Wiklanderc2888862014-08-04 15:39:58 +0200641 set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON);
642
643 /*
644 * OPTEE has been successfully initialized.
645 * Register power management hooks with PSCI
646 */
647 psci_register_spd_pm_hook(&opteed_pm);
648
649 /*
650 * Register an interrupt handler for S-EL1 interrupts
651 * when generated during code executing in the
652 * non-secure state.
653 */
654 flags = 0;
655 set_interrupt_rm_flag(flags, NON_SECURE);
656 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
657 opteed_sel1_interrupt_handler,
658 flags);
659 if (rc)
660 panic();
661 }
662
663 /*
664 * OPTEE reports completion. The OPTEED must have initiated
665 * the original request through a synchronous entry into
666 * OPTEE. Jump back to the original C runtime context.
667 */
668 opteed_synchronous_sp_exit(optee_ctx, x1);
Jonathan Wright75a5d8b2018-03-14 15:56:21 +0000669 break;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200670
671
672 /*
673 * These function IDs is used only by OP-TEE to indicate it has
674 * finished:
675 * 1. turning itself on in response to an earlier psci
676 * cpu_on request
677 * 2. resuming itself after an earlier psci cpu_suspend
678 * request.
679 */
680 case TEESMC_OPTEED_RETURN_ON_DONE:
681 case TEESMC_OPTEED_RETURN_RESUME_DONE:
682
683
684 /*
685 * These function IDs is used only by the SP to indicate it has
686 * finished:
687 * 1. suspending itself after an earlier psci cpu_suspend
688 * request.
689 * 2. turning itself off in response to an earlier psci
690 * cpu_off request.
691 */
692 case TEESMC_OPTEED_RETURN_OFF_DONE:
693 case TEESMC_OPTEED_RETURN_SUSPEND_DONE:
694 case TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE:
695 case TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE:
696
697 /*
698 * OPTEE reports completion. The OPTEED must have initiated the
699 * original request through a synchronous entry into OPTEE.
700 * Jump back to the original C runtime context, and pass x1 as
701 * return value to the caller
702 */
703 opteed_synchronous_sp_exit(optee_ctx, x1);
Jonathan Wright75a5d8b2018-03-14 15:56:21 +0000704 break;
Jens Wiklanderc2888862014-08-04 15:39:58 +0200705
706 /*
707 * OPTEE is returning from a call or being preempted from a call, in
708 * either case execution should resume in the normal world.
709 */
710 case TEESMC_OPTEED_RETURN_CALL_DONE:
711 /*
712 * This is the result from the secure client of an
713 * earlier request. The results are in x0-x3. Copy it
714 * into the non-secure context, save the secure state
715 * and return to the non-secure state.
716 */
717 assert(handle == cm_get_context(SECURE));
718 cm_el1_sysregs_context_save(SECURE);
719
720 /* Get a reference to the non-secure context */
721 ns_cpu_context = cm_get_context(NON_SECURE);
722 assert(ns_cpu_context);
723
724 /* Restore non-secure state */
725 cm_el1_sysregs_context_restore(NON_SECURE);
726 cm_set_next_eret_context(NON_SECURE);
727
728 SMC_RET4(ns_cpu_context, x1, x2, x3, x4);
729
730 /*
731 * OPTEE has finished handling a S-EL1 FIQ interrupt. Execution
732 * should resume in the normal world.
733 */
734 case TEESMC_OPTEED_RETURN_FIQ_DONE:
735 /* Get a reference to the non-secure context */
736 ns_cpu_context = cm_get_context(NON_SECURE);
737 assert(ns_cpu_context);
738
739 /*
740 * Restore non-secure state. There is no need to save the
741 * secure system register context since OPTEE was supposed
742 * to preserve it during S-EL1 interrupt handling.
743 */
744 cm_el1_sysregs_context_restore(NON_SECURE);
745 cm_set_next_eret_context(NON_SECURE);
746
747 SMC_RET0((uint64_t) ns_cpu_context);
748
749 default:
750 panic();
751 }
752}
753
754/* Define an OPTEED runtime service descriptor for fast SMC calls */
755DECLARE_RT_SVC(
756 opteed_fast,
757
758 OEN_TOS_START,
759 OEN_TOS_END,
760 SMC_TYPE_FAST,
761 opteed_setup,
762 opteed_smc_handler
763);
764
David Cunadoc8833ea2017-04-16 17:15:08 +0100765/* Define an OPTEED runtime service descriptor for yielding SMC calls */
Jens Wiklanderc2888862014-08-04 15:39:58 +0200766DECLARE_RT_SVC(
767 opteed_std,
768
769 OEN_TOS_START,
770 OEN_TOS_END,
David Cunadoc8833ea2017-04-16 17:15:08 +0100771 SMC_TYPE_YIELD,
Jens Wiklanderc2888862014-08-04 15:39:58 +0200772 NULL,
773 opteed_smc_handler
774);