blob: 8273060bd72c05570bcc2224feeb387b1e8dd2b9 [file] [log] [blame]
Achin Gupta6b4ec242021-10-04 20:13:36 +01001/*
Chris Kay99b5b2e2024-03-08 16:08:31 +00002 * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
Achin Gupta6b4ec242021-10-04 20:13:36 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <inttypes.h>
9#include <stdint.h>
10
11#include "../../services/std_svc/spm/el3_spmc/spmc.h"
12#include "../../services/std_svc/spm/el3_spmc/spmc_shared_mem.h"
13#include <arch_features.h>
14#include <arch_helpers.h>
15#include <bl32/tsp/tsp.h>
16#include <common/bl_common.h>
Chris Kay99b5b2e2024-03-08 16:08:31 +000017#include <common/build_message.h>
Achin Gupta6b4ec242021-10-04 20:13:36 +010018#include <common/debug.h>
Marc Bonnicie3878ca2021-12-23 20:14:34 +000019#include "ffa_helpers.h"
Achin Gupta6b4ec242021-10-04 20:13:36 +010020#include <lib/psci/psci.h>
21#include <lib/spinlock.h>
Marc Bonnicie3878ca2021-12-23 20:14:34 +000022#include <lib/xlat_tables/xlat_tables_defs.h>
23#include <lib/xlat_tables/xlat_tables_v2.h>
Achin Gupta6b4ec242021-10-04 20:13:36 +010024#include <plat/common/platform.h>
25#include <platform_tsp.h>
26#include <services/ffa_svc.h>
27#include "tsp_private.h"
28
29#include <platform_def.h>
30
Marc Bonnicie3878ca2021-12-23 20:14:34 +000031static ffa_endpoint_id16_t tsp_id, spmc_id;
32uint8_t mem_region_buffer[4096 * 2] __aligned(PAGE_SIZE);
33
34/* Partition Mailbox. */
35static uint8_t send_page[PAGE_SIZE] __aligned(PAGE_SIZE);
36static uint8_t recv_page[PAGE_SIZE] __aligned(PAGE_SIZE);
37
38/*
39 * Declare a global mailbox for use within the TSP.
40 * This will be initialized appropriately when the buffers
41 * are mapped with the SPMC.
42 */
43static struct mailbox mailbox;
44
45/*******************************************************************************
46 * This enum is used to handle test cases driven from the FF-A Test Driver.
47 ******************************************************************************/
48/* Keep in Sync with FF-A Test Driver. */
49enum message_t {
50 /* Partition Only Messages. */
51 FF_A_RELAY_MESSAGE = 0,
52
53 /* Basic Functionality. */
54 FF_A_ECHO_MESSAGE,
55 FF_A_RELAY_MESSAGE_EL3,
56
57 /* Memory Sharing. */
58 FF_A_MEMORY_SHARE,
59 FF_A_MEMORY_SHARE_FRAGMENTED,
60 FF_A_MEMORY_LEND,
61 FF_A_MEMORY_LEND_FRAGMENTED,
62
63 FF_A_MEMORY_SHARE_MULTI_ENDPOINT,
64 FF_A_MEMORY_LEND_MULTI_ENDPOINT,
65
66 LAST,
67 FF_A_RUN_ALL = 255,
68 FF_A_OP_MAX = 256
69};
70
71#if SPMC_AT_EL3
Achin Gupta6b4ec242021-10-04 20:13:36 +010072extern void tsp_cpu_on_entry(void);
Marc Bonnicie3878ca2021-12-23 20:14:34 +000073#endif
Achin Gupta6b4ec242021-10-04 20:13:36 +010074
Marc Bonnicie3878ca2021-12-23 20:14:34 +000075/*******************************************************************************
76 * Test Functions.
77 ******************************************************************************/
78
79/*******************************************************************************
80 * Enable the TSP to forward the received message to another partition and ask
81 * it to echo the value back in order to validate direct messages functionality.
82 ******************************************************************************/
83static int ffa_test_relay(uint64_t arg0,
84 uint64_t arg1,
85 uint64_t arg2,
86 uint64_t arg3,
87 uint64_t arg4,
88 uint64_t arg5,
89 uint64_t arg6,
90 uint64_t arg7)
91{
92 smc_args_t ffa_forward_result;
93 ffa_endpoint_id16_t receiver = arg5;
94
Marc Bonnici5cba35d2022-11-15 11:21:59 +000095 ffa_forward_result = ffa_msg_send_direct_req(tsp_id,
Marc Bonnicie3878ca2021-12-23 20:14:34 +000096 receiver,
97 FF_A_ECHO_MESSAGE, arg4,
98 0, 0, 0);
99 return ffa_forward_result._regs[3];
100}
101
102/*******************************************************************************
103 * This function handles memory management tests, currently share and lend.
104 * This test supports the use of FRAG_RX to use memory descriptors that do not
105 * fit in a single 4KB buffer.
106 ******************************************************************************/
107static int test_memory_send(ffa_endpoint_id16_t sender, uint64_t handle,
108 ffa_mtd_flag32_t flags, bool multi_endpoint)
109{
110 struct ffa_mtd *m;
111 struct ffa_emad_v1_0 *receivers;
112 struct ffa_comp_mrd *composite;
113 int ret, status = 0;
114 unsigned int mem_attrs;
115 char *ptr;
116 ffa_endpoint_id16_t source = sender;
117 uint32_t total_length, recv_length = 0;
118
119 /*
120 * In the case that we're testing multiple endpoints choose a partition
121 * ID that resides in the normal world so the SPMC won't detect it as
122 * invalid.
123 * TODO: Should get endpoint receiver id and flag as input from NWd.
124 */
125 uint32_t receiver_count = multi_endpoint ? 2 : 1;
126 ffa_endpoint_id16_t test_receivers[2] = { tsp_id, 0x10 };
127
128 /* Ensure that the sender ID resides in the normal world. */
129 if (ffa_is_secure_world_id(sender)) {
130 ERROR("Invalid sender ID 0x%x.\n", sender);
131 return FFA_ERROR_DENIED;
132 }
133
134 if (!memory_retrieve(&mailbox, &m, handle, source, test_receivers,
135 receiver_count, flags, &recv_length,
136 &total_length)) {
137 return FFA_ERROR_INVALID_PARAMETER;
138 }
139
140 receivers = (struct ffa_emad_v1_0 *)
141 ((uint8_t *) m + m->emad_offset);
142 while (total_length != recv_length) {
143 smc_args_t ffa_return;
144 uint32_t frag_length;
145
146 ffa_return = ffa_mem_frag_rx(handle, recv_length);
147
148 if (ffa_return._regs[0] == FFA_ERROR) {
149 WARN("TSP: failed to resume mem with handle %lx\n",
150 handle);
151 return ffa_return._regs[2];
152 }
153 frag_length = ffa_return._regs[3];
154
155 /* Validate frag_length is less than total_length and mailbox size. */
156 if (frag_length > total_length ||
157 frag_length > (mailbox.rxtx_page_count * PAGE_SIZE)) {
158 ERROR("Invalid parameters!\n");
159 return FFA_ERROR_INVALID_PARAMETER;
160 }
161
162 /* Validate frag_length is less than remaining mem_region_buffer size. */
163 if (frag_length + recv_length >= REGION_BUF_SIZE) {
164 ERROR("Out of memory!\n");
165 return FFA_ERROR_INVALID_PARAMETER;
166 }
167
168 memcpy(&mem_region_buffer[recv_length], mailbox.rx_buffer,
169 frag_length);
170
171 if (ffa_rx_release()) {
172 ERROR("Failed to release buffer!\n");
173 return FFA_ERROR_DENIED;
174 }
175
176 recv_length += frag_length;
177
178 assert(recv_length <= total_length);
179 }
180
181 composite = ffa_memory_region_get_composite(m, 0);
182 if (composite == NULL) {
183 WARN("Failed to get composite descriptor!\n");
184 return FFA_ERROR_INVALID_PARAMETER;
185 }
186
187 VERBOSE("Address: %p; page_count: %x %lx\n",
188 (void *)composite->address_range_array[0].address,
189 composite->address_range_array[0].page_count, PAGE_SIZE);
190
191 /* This test is only concerned with RW permissions. */
192 if (ffa_get_data_access_attr(
193 receivers[0].mapd.memory_access_permissions) != FFA_MEM_PERM_RW) {
194 ERROR("Data permission in retrieve response %x does not match share/lend %x!\n",
195 ffa_get_data_access_attr(receivers[0].mapd.memory_access_permissions),
196 FFA_MEM_PERM_RW);
197 return FFA_ERROR_INVALID_PARAMETER;
198 }
199
200 mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
201
202 /* Only expecting to be sent memory from NWd so map accordingly. */
203 mem_attrs |= MT_NS;
204
Thomas Viehweger5d3825a2023-01-23 11:26:37 +0100205 for (int32_t i = 0; i < (int32_t)composite->address_range_count; i++) {
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000206 size_t size = composite->address_range_array[i].page_count * PAGE_SIZE;
207
208 ptr = (char *) composite->address_range_array[i].address;
209 ret = mmap_add_dynamic_region(
210 (uint64_t)ptr,
211 (uint64_t)ptr,
212 size, mem_attrs);
213
214 if (ret != 0) {
Thomas Viehweger5d3825a2023-01-23 11:26:37 +0100215 ERROR("Failed [%d] mmap_add_dynamic_region %u (%lx) (%lx) (%x)!\n",
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000216 i, ret,
217 (uint64_t)composite->address_range_array[i].address,
218 size, mem_attrs);
219
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100220 /* Remove mappings previously created in this transaction. */
221 for (i--; i >= 0; i--) {
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000222 ret = mmap_remove_dynamic_region(
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100223 (uint64_t)composite->address_range_array[i].address,
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000224 composite->address_range_array[i].page_count * PAGE_SIZE);
225
226 if (ret != 0) {
227 ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
228 panic();
229 }
230 }
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100231
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000232 return FFA_ERROR_NO_MEMORY;
233 }
234
235 /* Increment memory region for validation purposes. */
236 ++(*ptr);
237
238 /*
239 * Read initial magic number from memory region for
240 * validation purposes.
241 */
242 if (!i) {
243 status = *ptr;
244 }
245 }
246
247 for (uint32_t i = 0U; i < composite->address_range_count; i++) {
248 ret = mmap_remove_dynamic_region(
249 (uint64_t)composite->address_range_array[i].address,
250 composite->address_range_array[i].page_count * PAGE_SIZE);
251
252 if (ret != 0) {
253 ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
254 return FFA_ERROR_NO_MEMORY;
255 }
256 }
257
258 if (!memory_relinquish((struct ffa_mem_relinquish_descriptor *)mailbox.tx_buffer,
259 m->handle, tsp_id)) {
260 ERROR("Failed to relinquish memory region!\n");
261 return FFA_ERROR_INVALID_PARAMETER;
262 }
263 return status;
264}
Achin Gupta6b4ec242021-10-04 20:13:36 +0100265
266static smc_args_t *send_ffa_pm_success(void)
267{
268 return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC32,
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000269 ((tsp_id & FFA_DIRECT_MSG_ENDPOINT_ID_MASK)
270 << FFA_DIRECT_MSG_SOURCE_SHIFT) | spmc_id,
Achin Gupta6b4ec242021-10-04 20:13:36 +0100271 FFA_FWK_MSG_BIT |
272 (FFA_PM_MSG_PM_RESP & FFA_FWK_MSG_MASK),
273 0, 0, 0, 0, 0);
274}
275
276/*******************************************************************************
277 * This function performs any remaining book keeping in the test secure payload
278 * before this cpu is turned off in response to a psci cpu_off request.
279 ******************************************************************************/
280smc_args_t *tsp_cpu_off_main(uint64_t arg0,
281 uint64_t arg1,
282 uint64_t arg2,
283 uint64_t arg3,
284 uint64_t arg4,
285 uint64_t arg5,
286 uint64_t arg6,
287 uint64_t arg7)
288{
289 uint32_t linear_id = plat_my_core_pos();
290
291 /*
292 * This cpu is being turned off, so disable the timer to prevent the
293 * secure timer interrupt from interfering with power down. A pending
294 * interrupt will be lost but we do not care as we are turning off.
295 */
296 tsp_generic_timer_stop();
297
298 /* Update this cpu's statistics. */
299 tsp_stats[linear_id].smc_count++;
300 tsp_stats[linear_id].eret_count++;
301 tsp_stats[linear_id].cpu_off_count++;
302
Shruti Gupta568f0c52022-12-08 11:40:20 +0000303 VERBOSE("TSP: cpu 0x%lx off request\n", read_mpidr());
304 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100305 read_mpidr(),
306 tsp_stats[linear_id].smc_count,
307 tsp_stats[linear_id].eret_count,
308 tsp_stats[linear_id].cpu_off_count);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100309
310 return send_ffa_pm_success();
311}
312
313/*******************************************************************************
314 * This function performs any book keeping in the test secure payload before
315 * this cpu's architectural state is saved in response to an earlier psci
316 * cpu_suspend request.
317 ******************************************************************************/
318smc_args_t *tsp_cpu_suspend_main(uint64_t arg0,
319 uint64_t arg1,
320 uint64_t arg2,
321 uint64_t arg3,
322 uint64_t arg4,
323 uint64_t arg5,
324 uint64_t arg6,
325 uint64_t arg7)
326{
327 uint32_t linear_id = plat_my_core_pos();
328
329 /*
330 * Save the time context and disable it to prevent the secure timer
331 * interrupt from interfering with wakeup from the suspend state.
332 */
333 tsp_generic_timer_save();
334 tsp_generic_timer_stop();
335
336 /* Update this cpu's statistics. */
337 tsp_stats[linear_id].smc_count++;
338 tsp_stats[linear_id].eret_count++;
339 tsp_stats[linear_id].cpu_suspend_count++;
340
Shruti Gupta568f0c52022-12-08 11:40:20 +0000341 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100342 read_mpidr(),
343 tsp_stats[linear_id].smc_count,
344 tsp_stats[linear_id].eret_count,
345 tsp_stats[linear_id].cpu_suspend_count);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100346
347 return send_ffa_pm_success();
348}
349
350/*******************************************************************************
351 * This function performs any bookkeeping in the test secure payload after this
352 * cpu's architectural state has been restored after wakeup from an earlier psci
353 * cpu_suspend request.
354 ******************************************************************************/
355smc_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
356 uint64_t arg1,
357 uint64_t arg2,
358 uint64_t arg3,
359 uint64_t arg4,
360 uint64_t arg5,
361 uint64_t arg6,
362 uint64_t arg7)
363{
364 uint32_t linear_id = plat_my_core_pos();
365
366 /* Restore the generic timer context. */
367 tsp_generic_timer_restore();
368
369 /* Update this cpu's statistics. */
370 tsp_stats[linear_id].smc_count++;
371 tsp_stats[linear_id].eret_count++;
372 tsp_stats[linear_id].cpu_resume_count++;
373
Shruti Gupta568f0c52022-12-08 11:40:20 +0000374 VERBOSE("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100375 read_mpidr(), max_off_pwrlvl);
Shruti Gupta568f0c52022-12-08 11:40:20 +0000376 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100377 read_mpidr(),
378 tsp_stats[linear_id].smc_count,
379 tsp_stats[linear_id].eret_count,
380 tsp_stats[linear_id].cpu_resume_count);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100381
382 return send_ffa_pm_success();
383}
384
385/*******************************************************************************
386 * This function handles framework messages. Currently only PM.
387 ******************************************************************************/
388static smc_args_t *handle_framework_message(uint64_t arg0,
389 uint64_t arg1,
390 uint64_t arg2,
391 uint64_t arg3,
392 uint64_t arg4,
393 uint64_t arg5,
394 uint64_t arg6,
395 uint64_t arg7)
396{
397 /* Check if it is a power management message from the SPMC. */
398 if (ffa_endpoint_source(arg1) != spmc_id) {
399 goto err;
400 }
401
402 /* Check if it is a PM request message. */
403 if ((arg2 & FFA_FWK_MSG_MASK) == FFA_FWK_MSG_PSCI) {
404 /* Check if it is a PSCI CPU_OFF request. */
405 if (arg3 == PSCI_CPU_OFF) {
406 return tsp_cpu_off_main(arg0, arg1, arg2, arg3,
407 arg4, arg5, arg6, arg7);
408 } else if (arg3 == PSCI_CPU_SUSPEND_AARCH64) {
409 return tsp_cpu_suspend_main(arg0, arg1, arg2, arg3,
410 arg4, arg5, arg6, arg7);
411 }
412 } else if ((arg2 & FFA_FWK_MSG_MASK) == FFA_PM_MSG_WB_REQ) {
413 /* Check it is a PSCI Warm Boot request. */
414 if (arg3 == FFA_WB_TYPE_NOTS2RAM) {
415 return tsp_cpu_resume_main(arg0, arg1, arg2, arg3,
416 arg4, arg5, arg6, arg7);
417 }
418 }
419
420err:
421 ERROR("%s: Unknown framework message!\n", __func__);
422 panic();
423}
424
425/*******************************************************************************
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000426 * Handles partition messages. Exercised from the FF-A Test Driver.
427 ******************************************************************************/
428static smc_args_t *handle_partition_message(uint64_t arg0,
429 uint64_t arg1,
430 uint64_t arg2,
431 uint64_t arg3,
432 uint64_t arg4,
433 uint64_t arg5,
434 uint64_t arg6,
435 uint64_t arg7)
436{
437 uint16_t sender = ffa_endpoint_source(arg1);
438 uint16_t receiver = ffa_endpoint_destination(arg1);
439 int status = -1;
440 const bool multi_endpoint = true;
441
442 switch (arg3) {
443 case FF_A_MEMORY_SHARE:
444 INFO("TSP Tests: Memory Share Request--\n");
445 status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, !multi_endpoint);
446 break;
447
448 case FF_A_MEMORY_LEND:
449 INFO("TSP Tests: Memory Lend Request--\n");
450 status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, !multi_endpoint);
451 break;
452
453 case FF_A_MEMORY_SHARE_MULTI_ENDPOINT:
454 INFO("TSP Tests: Multi Endpoint Memory Share Request--\n");
455 status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, multi_endpoint);
456 break;
457
458 case FF_A_MEMORY_LEND_MULTI_ENDPOINT:
459 INFO("TSP Tests: Multi Endpoint Memory Lend Request--\n");
460 status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, multi_endpoint);
461 break;
462 case FF_A_RELAY_MESSAGE:
463 INFO("TSP Tests: Relaying message--\n");
464 status = ffa_test_relay(arg0, arg1, arg2, arg3, arg4,
465 arg5, arg6, arg7);
466 break;
467
468 case FF_A_ECHO_MESSAGE:
469 INFO("TSP Tests: echo message--\n");
470 status = arg4;
471 break;
472
473 default:
474 INFO("TSP Tests: Unknown request ID %d--\n", (int) arg3);
475 }
476
477 /* Swap the sender and receiver in the response. */
478 return ffa_msg_send_direct_resp(receiver, sender, status, 0, 0, 0, 0);
479}
480
481/*******************************************************************************
Achin Gupta6b4ec242021-10-04 20:13:36 +0100482 * This function implements the event loop for handling FF-A ABI invocations.
483 ******************************************************************************/
484static smc_args_t *tsp_event_loop(uint64_t smc_fid,
485 uint64_t arg1,
486 uint64_t arg2,
487 uint64_t arg3,
488 uint64_t arg4,
489 uint64_t arg5,
490 uint64_t arg6,
491 uint64_t arg7)
492{
493 /* Panic if the SPMC did not forward an FF-A call. */
494 if (!is_ffa_fid(smc_fid)) {
495 ERROR("%s: Unknown SMC FID (0x%lx)\n", __func__, smc_fid);
496 panic();
497 }
498
499 switch (smc_fid) {
500 case FFA_INTERRUPT:
501 /*
502 * IRQs were enabled upon re-entry into the TSP. The interrupt
503 * must have been handled by now. Return to the SPMC indicating
504 * the same.
505 */
506 return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
507
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000508 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
Achin Gupta6b4ec242021-10-04 20:13:36 +0100509 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
510 /* Check if a framework message, handle accordingly. */
511 if ((arg2 & FFA_FWK_MSG_BIT)) {
512 return handle_framework_message(smc_fid, arg1, arg2, arg3,
513 arg4, arg5, arg6, arg7);
514 }
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000515 return handle_partition_message(smc_fid, arg1, arg2, arg3,
516 arg4, arg5, arg6, arg7);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100517 }
518
519 ERROR("%s: Unsupported FF-A FID (0x%lx)\n", __func__, smc_fid);
520 panic();
521}
522
523static smc_args_t *tsp_loop(smc_args_t *args)
524{
525 smc_args_t ret;
526
527 do {
528 /* --------------------------------------------
529 * Mask FIQ interrupts to avoid preemption
530 * in case EL3 SPMC delegates an IRQ next or a
531 * managed exit. Lastly, unmask IRQs so that
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000532 * they can be handled immediately upon re-entry.
Achin Gupta6b4ec242021-10-04 20:13:36 +0100533 * ---------------------------------------------
534 */
535 write_daifset(DAIF_FIQ_BIT);
536 write_daifclr(DAIF_IRQ_BIT);
537 ret = smc_helper(args->_regs[0], args->_regs[1], args->_regs[2],
538 args->_regs[3], args->_regs[4], args->_regs[5],
539 args->_regs[6], args->_regs[7]);
540 args = tsp_event_loop(ret._regs[0], ret._regs[1], ret._regs[2],
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000541 ret._regs[3], ret._regs[4], ret._regs[5],
542 ret._regs[6], ret._regs[7]);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100543 } while (1);
544
545 /* Not Reached. */
546 return NULL;
547}
548
549/*******************************************************************************
550 * TSP main entry point where it gets the opportunity to initialize its secure
551 * state/applications. Once the state is initialized, it must return to the
552 * SPD with a pointer to the 'tsp_vector_table' jump table.
553 ******************************************************************************/
554uint64_t tsp_main(void)
555{
556 smc_args_t smc_args = {0};
557
Chris Kay99b5b2e2024-03-08 16:08:31 +0000558 NOTICE("TSP: %s\n", build_version_string);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100559 NOTICE("TSP: %s\n", build_message);
560 INFO("TSP: Total memory base : 0x%lx\n", (unsigned long) BL32_BASE);
561 INFO("TSP: Total memory size : 0x%lx bytes\n", BL32_TOTAL_SIZE);
562 uint32_t linear_id = plat_my_core_pos();
563
564 /* Initialize the platform. */
565 tsp_platform_setup();
566
567 /* Initialize secure/applications state here. */
568 tsp_generic_timer_start();
569
570 /* Register secondary entrypoint with the SPMC. */
571 smc_args = smc_helper(FFA_SECONDARY_EP_REGISTER_SMC64,
572 (uint64_t) tsp_cpu_on_entry,
573 0, 0, 0, 0, 0, 0);
574 if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
575 ERROR("TSP could not register secondary ep (0x%lx)\n",
576 smc_args._regs[2]);
577 panic();
578 }
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000579 /* Get TSP's endpoint id. */
Achin Gupta6b4ec242021-10-04 20:13:36 +0100580 smc_args = smc_helper(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
581 if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
582 ERROR("TSP could not get own ID (0x%lx) on core%d\n",
583 smc_args._regs[2], linear_id);
584 panic();
585 }
586
587 tsp_id = smc_args._regs[2];
588 INFO("TSP FF-A endpoint id = 0x%x\n", tsp_id);
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000589
590 /* Get the SPMC ID. */
Achin Gupta6b4ec242021-10-04 20:13:36 +0100591 smc_args = smc_helper(FFA_SPM_ID_GET, 0, 0, 0, 0, 0, 0, 0);
592 if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
593 ERROR("TSP could not get SPMC ID (0x%lx) on core%d\n",
594 smc_args._regs[2], linear_id);
595 panic();
596 }
597
598 spmc_id = smc_args._regs[2];
599
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000600 /* Call RXTX_MAP to map a 4k RX and TX buffer. */
601 if (ffa_rxtx_map((uintptr_t) send_page,
602 (uintptr_t) recv_page, 1)) {
603 ERROR("TSP could not map it's RX/TX Buffers\n");
604 panic();
605 }
606
607 mailbox.tx_buffer = send_page;
608 mailbox.rx_buffer = recv_page;
609 mailbox.rxtx_page_count = 1;
610
611 /* Update this cpu's statistics. */
Achin Gupta6b4ec242021-10-04 20:13:36 +0100612 tsp_stats[linear_id].smc_count++;
613 tsp_stats[linear_id].eret_count++;
614 tsp_stats[linear_id].cpu_on_count++;
615
Shruti Gupta568f0c52022-12-08 11:40:20 +0000616 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100617 read_mpidr(),
618 tsp_stats[linear_id].smc_count,
619 tsp_stats[linear_id].eret_count,
620 tsp_stats[linear_id].cpu_on_count);
621
622 /* Tell SPMD that we are done initialising. */
623 tsp_loop(set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0));
624
625 /* Not reached. */
626 return 0;
627}
628
629/*******************************************************************************
630 * This function performs any remaining book keeping in the test secure payload
631 * after this cpu's architectural state has been setup in response to an earlier
632 * psci cpu_on request.
633 ******************************************************************************/
634smc_args_t *tsp_cpu_on_main(void)
635{
636 uint32_t linear_id = plat_my_core_pos();
637
638 /* Initialize secure/applications state here. */
639 tsp_generic_timer_start();
640
641 /* Update this cpu's statistics. */
642 tsp_stats[linear_id].smc_count++;
643 tsp_stats[linear_id].eret_count++;
644 tsp_stats[linear_id].cpu_on_count++;
Shruti Gupta568f0c52022-12-08 11:40:20 +0000645 VERBOSE("TSP: cpu 0x%lx turned on\n", read_mpidr());
646 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100647 read_mpidr(),
648 tsp_stats[linear_id].smc_count,
649 tsp_stats[linear_id].eret_count,
650 tsp_stats[linear_id].cpu_on_count);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100651 /* ---------------------------------------------
652 * Jump to the main event loop to return to EL3
653 * and be ready for the next request on this cpu.
654 * ---------------------------------------------
655 */
656 return tsp_loop(set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0));
657}