blob: 1c8c68f037bdbebbc7cd6cba2c72fc63a69ef9a5 [file] [log] [blame]
Achin Gupta6b4ec242021-10-04 20:13:36 +01001/*
Marc Bonnicie3878ca2021-12-23 20:14:34 +00002 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
Achin Gupta6b4ec242021-10-04 20:13:36 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <inttypes.h>
9#include <stdint.h>
10
11#include "../../services/std_svc/spm/el3_spmc/spmc.h"
12#include "../../services/std_svc/spm/el3_spmc/spmc_shared_mem.h"
13#include <arch_features.h>
14#include <arch_helpers.h>
15#include <bl32/tsp/tsp.h>
16#include <common/bl_common.h>
17#include <common/debug.h>
Marc Bonnicie3878ca2021-12-23 20:14:34 +000018#include "ffa_helpers.h"
Achin Gupta6b4ec242021-10-04 20:13:36 +010019#include <lib/psci/psci.h>
20#include <lib/spinlock.h>
Marc Bonnicie3878ca2021-12-23 20:14:34 +000021#include <lib/xlat_tables/xlat_tables_defs.h>
22#include <lib/xlat_tables/xlat_tables_v2.h>
Achin Gupta6b4ec242021-10-04 20:13:36 +010023#include <plat/common/platform.h>
24#include <platform_tsp.h>
25#include <services/ffa_svc.h>
26#include "tsp_private.h"
27
28#include <platform_def.h>
29
Marc Bonnicie3878ca2021-12-23 20:14:34 +000030static ffa_endpoint_id16_t tsp_id, spmc_id;
31uint8_t mem_region_buffer[4096 * 2] __aligned(PAGE_SIZE);
32
33/* Partition Mailbox. */
34static uint8_t send_page[PAGE_SIZE] __aligned(PAGE_SIZE);
35static uint8_t recv_page[PAGE_SIZE] __aligned(PAGE_SIZE);
36
37/*
38 * Declare a global mailbox for use within the TSP.
39 * This will be initialized appropriately when the buffers
40 * are mapped with the SPMC.
41 */
42static struct mailbox mailbox;
43
44/*******************************************************************************
45 * This enum is used to handle test cases driven from the FF-A Test Driver.
46 ******************************************************************************/
47/* Keep in Sync with FF-A Test Driver. */
48enum message_t {
49 /* Partition Only Messages. */
50 FF_A_RELAY_MESSAGE = 0,
51
52 /* Basic Functionality. */
53 FF_A_ECHO_MESSAGE,
54 FF_A_RELAY_MESSAGE_EL3,
55
56 /* Memory Sharing. */
57 FF_A_MEMORY_SHARE,
58 FF_A_MEMORY_SHARE_FRAGMENTED,
59 FF_A_MEMORY_LEND,
60 FF_A_MEMORY_LEND_FRAGMENTED,
61
62 FF_A_MEMORY_SHARE_MULTI_ENDPOINT,
63 FF_A_MEMORY_LEND_MULTI_ENDPOINT,
64
65 LAST,
66 FF_A_RUN_ALL = 255,
67 FF_A_OP_MAX = 256
68};
69
70#if SPMC_AT_EL3
Achin Gupta6b4ec242021-10-04 20:13:36 +010071extern void tsp_cpu_on_entry(void);
Marc Bonnicie3878ca2021-12-23 20:14:34 +000072#endif
Achin Gupta6b4ec242021-10-04 20:13:36 +010073
Marc Bonnicie3878ca2021-12-23 20:14:34 +000074/*******************************************************************************
75 * Test Functions.
76 ******************************************************************************/
77
78/*******************************************************************************
79 * Enable the TSP to forward the received message to another partition and ask
80 * it to echo the value back in order to validate direct messages functionality.
81 ******************************************************************************/
82static int ffa_test_relay(uint64_t arg0,
83 uint64_t arg1,
84 uint64_t arg2,
85 uint64_t arg3,
86 uint64_t arg4,
87 uint64_t arg5,
88 uint64_t arg6,
89 uint64_t arg7)
90{
91 smc_args_t ffa_forward_result;
92 ffa_endpoint_id16_t receiver = arg5;
93
Marc Bonnici5cba35d2022-11-15 11:21:59 +000094 ffa_forward_result = ffa_msg_send_direct_req(tsp_id,
Marc Bonnicie3878ca2021-12-23 20:14:34 +000095 receiver,
96 FF_A_ECHO_MESSAGE, arg4,
97 0, 0, 0);
98 return ffa_forward_result._regs[3];
99}
100
101/*******************************************************************************
102 * This function handles memory management tests, currently share and lend.
103 * This test supports the use of FRAG_RX to use memory descriptors that do not
104 * fit in a single 4KB buffer.
105 ******************************************************************************/
106static int test_memory_send(ffa_endpoint_id16_t sender, uint64_t handle,
107 ffa_mtd_flag32_t flags, bool multi_endpoint)
108{
109 struct ffa_mtd *m;
110 struct ffa_emad_v1_0 *receivers;
111 struct ffa_comp_mrd *composite;
112 int ret, status = 0;
113 unsigned int mem_attrs;
114 char *ptr;
115 ffa_endpoint_id16_t source = sender;
116 uint32_t total_length, recv_length = 0;
117
118 /*
119 * In the case that we're testing multiple endpoints choose a partition
120 * ID that resides in the normal world so the SPMC won't detect it as
121 * invalid.
122 * TODO: Should get endpoint receiver id and flag as input from NWd.
123 */
124 uint32_t receiver_count = multi_endpoint ? 2 : 1;
125 ffa_endpoint_id16_t test_receivers[2] = { tsp_id, 0x10 };
126
127 /* Ensure that the sender ID resides in the normal world. */
128 if (ffa_is_secure_world_id(sender)) {
129 ERROR("Invalid sender ID 0x%x.\n", sender);
130 return FFA_ERROR_DENIED;
131 }
132
133 if (!memory_retrieve(&mailbox, &m, handle, source, test_receivers,
134 receiver_count, flags, &recv_length,
135 &total_length)) {
136 return FFA_ERROR_INVALID_PARAMETER;
137 }
138
139 receivers = (struct ffa_emad_v1_0 *)
140 ((uint8_t *) m + m->emad_offset);
141 while (total_length != recv_length) {
142 smc_args_t ffa_return;
143 uint32_t frag_length;
144
145 ffa_return = ffa_mem_frag_rx(handle, recv_length);
146
147 if (ffa_return._regs[0] == FFA_ERROR) {
148 WARN("TSP: failed to resume mem with handle %lx\n",
149 handle);
150 return ffa_return._regs[2];
151 }
152 frag_length = ffa_return._regs[3];
153
154 /* Validate frag_length is less than total_length and mailbox size. */
155 if (frag_length > total_length ||
156 frag_length > (mailbox.rxtx_page_count * PAGE_SIZE)) {
157 ERROR("Invalid parameters!\n");
158 return FFA_ERROR_INVALID_PARAMETER;
159 }
160
161 /* Validate frag_length is less than remaining mem_region_buffer size. */
162 if (frag_length + recv_length >= REGION_BUF_SIZE) {
163 ERROR("Out of memory!\n");
164 return FFA_ERROR_INVALID_PARAMETER;
165 }
166
167 memcpy(&mem_region_buffer[recv_length], mailbox.rx_buffer,
168 frag_length);
169
170 if (ffa_rx_release()) {
171 ERROR("Failed to release buffer!\n");
172 return FFA_ERROR_DENIED;
173 }
174
175 recv_length += frag_length;
176
177 assert(recv_length <= total_length);
178 }
179
180 composite = ffa_memory_region_get_composite(m, 0);
181 if (composite == NULL) {
182 WARN("Failed to get composite descriptor!\n");
183 return FFA_ERROR_INVALID_PARAMETER;
184 }
185
186 VERBOSE("Address: %p; page_count: %x %lx\n",
187 (void *)composite->address_range_array[0].address,
188 composite->address_range_array[0].page_count, PAGE_SIZE);
189
190 /* This test is only concerned with RW permissions. */
191 if (ffa_get_data_access_attr(
192 receivers[0].mapd.memory_access_permissions) != FFA_MEM_PERM_RW) {
193 ERROR("Data permission in retrieve response %x does not match share/lend %x!\n",
194 ffa_get_data_access_attr(receivers[0].mapd.memory_access_permissions),
195 FFA_MEM_PERM_RW);
196 return FFA_ERROR_INVALID_PARAMETER;
197 }
198
199 mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
200
201 /* Only expecting to be sent memory from NWd so map accordingly. */
202 mem_attrs |= MT_NS;
203
Thomas Viehweger5d3825a2023-01-23 11:26:37 +0100204 for (int32_t i = 0; i < (int32_t)composite->address_range_count; i++) {
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000205 size_t size = composite->address_range_array[i].page_count * PAGE_SIZE;
206
207 ptr = (char *) composite->address_range_array[i].address;
208 ret = mmap_add_dynamic_region(
209 (uint64_t)ptr,
210 (uint64_t)ptr,
211 size, mem_attrs);
212
213 if (ret != 0) {
Thomas Viehweger5d3825a2023-01-23 11:26:37 +0100214 ERROR("Failed [%d] mmap_add_dynamic_region %u (%lx) (%lx) (%x)!\n",
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000215 i, ret,
216 (uint64_t)composite->address_range_array[i].address,
217 size, mem_attrs);
218
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100219 /* Remove mappings previously created in this transaction. */
220 for (i--; i >= 0; i--) {
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000221 ret = mmap_remove_dynamic_region(
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100222 (uint64_t)composite->address_range_array[i].address,
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000223 composite->address_range_array[i].page_count * PAGE_SIZE);
224
225 if (ret != 0) {
226 ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
227 panic();
228 }
229 }
Shruti Gupta20ce06c2022-08-25 14:22:53 +0100230
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000231 return FFA_ERROR_NO_MEMORY;
232 }
233
234 /* Increment memory region for validation purposes. */
235 ++(*ptr);
236
237 /*
238 * Read initial magic number from memory region for
239 * validation purposes.
240 */
241 if (!i) {
242 status = *ptr;
243 }
244 }
245
246 for (uint32_t i = 0U; i < composite->address_range_count; i++) {
247 ret = mmap_remove_dynamic_region(
248 (uint64_t)composite->address_range_array[i].address,
249 composite->address_range_array[i].page_count * PAGE_SIZE);
250
251 if (ret != 0) {
252 ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
253 return FFA_ERROR_NO_MEMORY;
254 }
255 }
256
257 if (!memory_relinquish((struct ffa_mem_relinquish_descriptor *)mailbox.tx_buffer,
258 m->handle, tsp_id)) {
259 ERROR("Failed to relinquish memory region!\n");
260 return FFA_ERROR_INVALID_PARAMETER;
261 }
262 return status;
263}
Achin Gupta6b4ec242021-10-04 20:13:36 +0100264
265static smc_args_t *send_ffa_pm_success(void)
266{
267 return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC32,
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000268 ((tsp_id & FFA_DIRECT_MSG_ENDPOINT_ID_MASK)
269 << FFA_DIRECT_MSG_SOURCE_SHIFT) | spmc_id,
Achin Gupta6b4ec242021-10-04 20:13:36 +0100270 FFA_FWK_MSG_BIT |
271 (FFA_PM_MSG_PM_RESP & FFA_FWK_MSG_MASK),
272 0, 0, 0, 0, 0);
273}
274
275/*******************************************************************************
276 * This function performs any remaining book keeping in the test secure payload
277 * before this cpu is turned off in response to a psci cpu_off request.
278 ******************************************************************************/
279smc_args_t *tsp_cpu_off_main(uint64_t arg0,
280 uint64_t arg1,
281 uint64_t arg2,
282 uint64_t arg3,
283 uint64_t arg4,
284 uint64_t arg5,
285 uint64_t arg6,
286 uint64_t arg7)
287{
288 uint32_t linear_id = plat_my_core_pos();
289
290 /*
291 * This cpu is being turned off, so disable the timer to prevent the
292 * secure timer interrupt from interfering with power down. A pending
293 * interrupt will be lost but we do not care as we are turning off.
294 */
295 tsp_generic_timer_stop();
296
297 /* Update this cpu's statistics. */
298 tsp_stats[linear_id].smc_count++;
299 tsp_stats[linear_id].eret_count++;
300 tsp_stats[linear_id].cpu_off_count++;
301
Shruti Gupta568f0c52022-12-08 11:40:20 +0000302 VERBOSE("TSP: cpu 0x%lx off request\n", read_mpidr());
303 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100304 read_mpidr(),
305 tsp_stats[linear_id].smc_count,
306 tsp_stats[linear_id].eret_count,
307 tsp_stats[linear_id].cpu_off_count);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100308
309 return send_ffa_pm_success();
310}
311
312/*******************************************************************************
313 * This function performs any book keeping in the test secure payload before
314 * this cpu's architectural state is saved in response to an earlier psci
315 * cpu_suspend request.
316 ******************************************************************************/
317smc_args_t *tsp_cpu_suspend_main(uint64_t arg0,
318 uint64_t arg1,
319 uint64_t arg2,
320 uint64_t arg3,
321 uint64_t arg4,
322 uint64_t arg5,
323 uint64_t arg6,
324 uint64_t arg7)
325{
326 uint32_t linear_id = plat_my_core_pos();
327
328 /*
329 * Save the time context and disable it to prevent the secure timer
330 * interrupt from interfering with wakeup from the suspend state.
331 */
332 tsp_generic_timer_save();
333 tsp_generic_timer_stop();
334
335 /* Update this cpu's statistics. */
336 tsp_stats[linear_id].smc_count++;
337 tsp_stats[linear_id].eret_count++;
338 tsp_stats[linear_id].cpu_suspend_count++;
339
Shruti Gupta568f0c52022-12-08 11:40:20 +0000340 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100341 read_mpidr(),
342 tsp_stats[linear_id].smc_count,
343 tsp_stats[linear_id].eret_count,
344 tsp_stats[linear_id].cpu_suspend_count);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100345
346 return send_ffa_pm_success();
347}
348
349/*******************************************************************************
350 * This function performs any bookkeeping in the test secure payload after this
351 * cpu's architectural state has been restored after wakeup from an earlier psci
352 * cpu_suspend request.
353 ******************************************************************************/
354smc_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
355 uint64_t arg1,
356 uint64_t arg2,
357 uint64_t arg3,
358 uint64_t arg4,
359 uint64_t arg5,
360 uint64_t arg6,
361 uint64_t arg7)
362{
363 uint32_t linear_id = plat_my_core_pos();
364
365 /* Restore the generic timer context. */
366 tsp_generic_timer_restore();
367
368 /* Update this cpu's statistics. */
369 tsp_stats[linear_id].smc_count++;
370 tsp_stats[linear_id].eret_count++;
371 tsp_stats[linear_id].cpu_resume_count++;
372
Shruti Gupta568f0c52022-12-08 11:40:20 +0000373 VERBOSE("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100374 read_mpidr(), max_off_pwrlvl);
Shruti Gupta568f0c52022-12-08 11:40:20 +0000375 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100376 read_mpidr(),
377 tsp_stats[linear_id].smc_count,
378 tsp_stats[linear_id].eret_count,
379 tsp_stats[linear_id].cpu_resume_count);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100380
381 return send_ffa_pm_success();
382}
383
384/*******************************************************************************
385 * This function handles framework messages. Currently only PM.
386 ******************************************************************************/
387static smc_args_t *handle_framework_message(uint64_t arg0,
388 uint64_t arg1,
389 uint64_t arg2,
390 uint64_t arg3,
391 uint64_t arg4,
392 uint64_t arg5,
393 uint64_t arg6,
394 uint64_t arg7)
395{
396 /* Check if it is a power management message from the SPMC. */
397 if (ffa_endpoint_source(arg1) != spmc_id) {
398 goto err;
399 }
400
401 /* Check if it is a PM request message. */
402 if ((arg2 & FFA_FWK_MSG_MASK) == FFA_FWK_MSG_PSCI) {
403 /* Check if it is a PSCI CPU_OFF request. */
404 if (arg3 == PSCI_CPU_OFF) {
405 return tsp_cpu_off_main(arg0, arg1, arg2, arg3,
406 arg4, arg5, arg6, arg7);
407 } else if (arg3 == PSCI_CPU_SUSPEND_AARCH64) {
408 return tsp_cpu_suspend_main(arg0, arg1, arg2, arg3,
409 arg4, arg5, arg6, arg7);
410 }
411 } else if ((arg2 & FFA_FWK_MSG_MASK) == FFA_PM_MSG_WB_REQ) {
412 /* Check it is a PSCI Warm Boot request. */
413 if (arg3 == FFA_WB_TYPE_NOTS2RAM) {
414 return tsp_cpu_resume_main(arg0, arg1, arg2, arg3,
415 arg4, arg5, arg6, arg7);
416 }
417 }
418
419err:
420 ERROR("%s: Unknown framework message!\n", __func__);
421 panic();
422}
423
424/*******************************************************************************
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000425 * Handles partition messages. Exercised from the FF-A Test Driver.
426 ******************************************************************************/
427static smc_args_t *handle_partition_message(uint64_t arg0,
428 uint64_t arg1,
429 uint64_t arg2,
430 uint64_t arg3,
431 uint64_t arg4,
432 uint64_t arg5,
433 uint64_t arg6,
434 uint64_t arg7)
435{
436 uint16_t sender = ffa_endpoint_source(arg1);
437 uint16_t receiver = ffa_endpoint_destination(arg1);
438 int status = -1;
439 const bool multi_endpoint = true;
440
441 switch (arg3) {
442 case FF_A_MEMORY_SHARE:
443 INFO("TSP Tests: Memory Share Request--\n");
444 status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, !multi_endpoint);
445 break;
446
447 case FF_A_MEMORY_LEND:
448 INFO("TSP Tests: Memory Lend Request--\n");
449 status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, !multi_endpoint);
450 break;
451
452 case FF_A_MEMORY_SHARE_MULTI_ENDPOINT:
453 INFO("TSP Tests: Multi Endpoint Memory Share Request--\n");
454 status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, multi_endpoint);
455 break;
456
457 case FF_A_MEMORY_LEND_MULTI_ENDPOINT:
458 INFO("TSP Tests: Multi Endpoint Memory Lend Request--\n");
459 status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, multi_endpoint);
460 break;
461 case FF_A_RELAY_MESSAGE:
462 INFO("TSP Tests: Relaying message--\n");
463 status = ffa_test_relay(arg0, arg1, arg2, arg3, arg4,
464 arg5, arg6, arg7);
465 break;
466
467 case FF_A_ECHO_MESSAGE:
468 INFO("TSP Tests: echo message--\n");
469 status = arg4;
470 break;
471
472 default:
473 INFO("TSP Tests: Unknown request ID %d--\n", (int) arg3);
474 }
475
476 /* Swap the sender and receiver in the response. */
477 return ffa_msg_send_direct_resp(receiver, sender, status, 0, 0, 0, 0);
478}
479
480/*******************************************************************************
Achin Gupta6b4ec242021-10-04 20:13:36 +0100481 * This function implements the event loop for handling FF-A ABI invocations.
482 ******************************************************************************/
483static smc_args_t *tsp_event_loop(uint64_t smc_fid,
484 uint64_t arg1,
485 uint64_t arg2,
486 uint64_t arg3,
487 uint64_t arg4,
488 uint64_t arg5,
489 uint64_t arg6,
490 uint64_t arg7)
491{
492 /* Panic if the SPMC did not forward an FF-A call. */
493 if (!is_ffa_fid(smc_fid)) {
494 ERROR("%s: Unknown SMC FID (0x%lx)\n", __func__, smc_fid);
495 panic();
496 }
497
498 switch (smc_fid) {
499 case FFA_INTERRUPT:
500 /*
501 * IRQs were enabled upon re-entry into the TSP. The interrupt
502 * must have been handled by now. Return to the SPMC indicating
503 * the same.
504 */
505 return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
506
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000507 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
Achin Gupta6b4ec242021-10-04 20:13:36 +0100508 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
509 /* Check if a framework message, handle accordingly. */
510 if ((arg2 & FFA_FWK_MSG_BIT)) {
511 return handle_framework_message(smc_fid, arg1, arg2, arg3,
512 arg4, arg5, arg6, arg7);
513 }
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000514 return handle_partition_message(smc_fid, arg1, arg2, arg3,
515 arg4, arg5, arg6, arg7);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100516 }
517
518 ERROR("%s: Unsupported FF-A FID (0x%lx)\n", __func__, smc_fid);
519 panic();
520}
521
522static smc_args_t *tsp_loop(smc_args_t *args)
523{
524 smc_args_t ret;
525
526 do {
527 /* --------------------------------------------
528 * Mask FIQ interrupts to avoid preemption
529 * in case EL3 SPMC delegates an IRQ next or a
530 * managed exit. Lastly, unmask IRQs so that
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000531 * they can be handled immediately upon re-entry.
Achin Gupta6b4ec242021-10-04 20:13:36 +0100532 * ---------------------------------------------
533 */
534 write_daifset(DAIF_FIQ_BIT);
535 write_daifclr(DAIF_IRQ_BIT);
536 ret = smc_helper(args->_regs[0], args->_regs[1], args->_regs[2],
537 args->_regs[3], args->_regs[4], args->_regs[5],
538 args->_regs[6], args->_regs[7]);
539 args = tsp_event_loop(ret._regs[0], ret._regs[1], ret._regs[2],
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000540 ret._regs[3], ret._regs[4], ret._regs[5],
541 ret._regs[6], ret._regs[7]);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100542 } while (1);
543
544 /* Not Reached. */
545 return NULL;
546}
547
548/*******************************************************************************
549 * TSP main entry point where it gets the opportunity to initialize its secure
550 * state/applications. Once the state is initialized, it must return to the
551 * SPD with a pointer to the 'tsp_vector_table' jump table.
552 ******************************************************************************/
553uint64_t tsp_main(void)
554{
555 smc_args_t smc_args = {0};
556
557 NOTICE("TSP: %s\n", version_string);
558 NOTICE("TSP: %s\n", build_message);
559 INFO("TSP: Total memory base : 0x%lx\n", (unsigned long) BL32_BASE);
560 INFO("TSP: Total memory size : 0x%lx bytes\n", BL32_TOTAL_SIZE);
561 uint32_t linear_id = plat_my_core_pos();
562
563 /* Initialize the platform. */
564 tsp_platform_setup();
565
566 /* Initialize secure/applications state here. */
567 tsp_generic_timer_start();
568
569 /* Register secondary entrypoint with the SPMC. */
570 smc_args = smc_helper(FFA_SECONDARY_EP_REGISTER_SMC64,
571 (uint64_t) tsp_cpu_on_entry,
572 0, 0, 0, 0, 0, 0);
573 if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
574 ERROR("TSP could not register secondary ep (0x%lx)\n",
575 smc_args._regs[2]);
576 panic();
577 }
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000578 /* Get TSP's endpoint id. */
Achin Gupta6b4ec242021-10-04 20:13:36 +0100579 smc_args = smc_helper(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
580 if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
581 ERROR("TSP could not get own ID (0x%lx) on core%d\n",
582 smc_args._regs[2], linear_id);
583 panic();
584 }
585
586 tsp_id = smc_args._regs[2];
587 INFO("TSP FF-A endpoint id = 0x%x\n", tsp_id);
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000588
589 /* Get the SPMC ID. */
Achin Gupta6b4ec242021-10-04 20:13:36 +0100590 smc_args = smc_helper(FFA_SPM_ID_GET, 0, 0, 0, 0, 0, 0, 0);
591 if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
592 ERROR("TSP could not get SPMC ID (0x%lx) on core%d\n",
593 smc_args._regs[2], linear_id);
594 panic();
595 }
596
597 spmc_id = smc_args._regs[2];
598
Marc Bonnicie3878ca2021-12-23 20:14:34 +0000599 /* Call RXTX_MAP to map a 4k RX and TX buffer. */
600 if (ffa_rxtx_map((uintptr_t) send_page,
601 (uintptr_t) recv_page, 1)) {
602 ERROR("TSP could not map it's RX/TX Buffers\n");
603 panic();
604 }
605
606 mailbox.tx_buffer = send_page;
607 mailbox.rx_buffer = recv_page;
608 mailbox.rxtx_page_count = 1;
609
610 /* Update this cpu's statistics. */
Achin Gupta6b4ec242021-10-04 20:13:36 +0100611 tsp_stats[linear_id].smc_count++;
612 tsp_stats[linear_id].eret_count++;
613 tsp_stats[linear_id].cpu_on_count++;
614
Shruti Gupta568f0c52022-12-08 11:40:20 +0000615 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100616 read_mpidr(),
617 tsp_stats[linear_id].smc_count,
618 tsp_stats[linear_id].eret_count,
619 tsp_stats[linear_id].cpu_on_count);
620
621 /* Tell SPMD that we are done initialising. */
622 tsp_loop(set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0));
623
624 /* Not reached. */
625 return 0;
626}
627
628/*******************************************************************************
629 * This function performs any remaining book keeping in the test secure payload
630 * after this cpu's architectural state has been setup in response to an earlier
631 * psci cpu_on request.
632 ******************************************************************************/
633smc_args_t *tsp_cpu_on_main(void)
634{
635 uint32_t linear_id = plat_my_core_pos();
636
637 /* Initialize secure/applications state here. */
638 tsp_generic_timer_start();
639
640 /* Update this cpu's statistics. */
641 tsp_stats[linear_id].smc_count++;
642 tsp_stats[linear_id].eret_count++;
643 tsp_stats[linear_id].cpu_on_count++;
Shruti Gupta568f0c52022-12-08 11:40:20 +0000644 VERBOSE("TSP: cpu 0x%lx turned on\n", read_mpidr());
645 VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
Achin Gupta6b4ec242021-10-04 20:13:36 +0100646 read_mpidr(),
647 tsp_stats[linear_id].smc_count,
648 tsp_stats[linear_id].eret_count,
649 tsp_stats[linear_id].cpu_on_count);
Achin Gupta6b4ec242021-10-04 20:13:36 +0100650 /* ---------------------------------------------
651 * Jump to the main event loop to return to EL3
652 * and be ready for the next request on this cpu.
653 * ---------------------------------------------
654 */
655 return tsp_loop(set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0));
656}