blob: 19c8373cf9c4933800dbedebef504c71ffbbceb4 [file] [log] [blame]
Zelalem Aweke13dc8f12021-07-09 14:20:03 -05001/*
Sona Mathew04774e72025-03-11 15:59:02 -05002 * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved.
Zelalem Aweke13dc8f12021-07-09 14:20:03 -05003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Manish Pandey9174a752021-11-09 20:49:56 +00009#include <inttypes.h>
10#include <stdint.h>
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050011#include <string.h>
12
13#include <arch_helpers.h>
14#include <arch_features.h>
15#include <bl31/bl31.h>
16#include <common/debug.h>
17#include <common/runtime_svc.h>
18#include <context.h>
19#include <lib/el3_runtime/context_mgmt.h>
Elizabeth Ho4fc00d22023-07-18 14:10:25 +010020#include <lib/el3_runtime/cpu_data.h>
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050021#include <lib/el3_runtime/pubsub.h>
Javier Almansa Sobrino7d892a12025-03-21 18:32:10 +000022#include <lib/extensions/mpam.h>
Boyan Karatotev05504ba2023-02-15 13:21:50 +000023#include <lib/extensions/pmuv3.h>
24#include <lib/extensions/sys_reg_trace.h>
johpow019d134022021-06-16 17:57:28 -050025#include <lib/gpt_rme/gpt_rme.h>
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050026
27#include <lib/spinlock.h>
28#include <lib/utils.h>
29#include <lib/xlat_tables/xlat_tables_v2.h>
30#include <plat/common/common_def.h>
31#include <plat/common/platform.h>
32#include <platform_def.h>
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050033#include <services/rmmd_svc.h>
34#include <smccc_helpers.h>
Arunachalam Ganapathy337700a2023-05-18 10:57:29 +010035#include <lib/extensions/sme.h>
Subhasish Ghoshc25225a2021-12-09 15:41:37 +000036#include <lib/extensions/sve.h>
Boyan Karatotev4a615bb2024-12-10 17:13:51 +000037#include <lib/extensions/spe.h>
38#include <lib/extensions/trbe.h>
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050039#include "rmmd_initial_context.h"
40#include "rmmd_private.h"
41
42/*******************************************************************************
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +000043 * RMM boot failure flag
44 ******************************************************************************/
45static bool rmm_boot_failed;
46
47/*******************************************************************************
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050048 * RMM context information.
49 ******************************************************************************/
50rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
51
52/*******************************************************************************
53 * RMM entry point information. Discovered on the primary core and reused
54 * on secondary cores.
55 ******************************************************************************/
56static entry_point_info_t *rmm_ep_info;
57
58/*******************************************************************************
59 * Static function declaration.
60 ******************************************************************************/
61static int32_t rmm_init(void);
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050062
63/*******************************************************************************
64 * This function takes an RMM context pointer and performs a synchronous entry
65 * into it.
66 ******************************************************************************/
67uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
68{
69 uint64_t rc;
70
71 assert(rmm_ctx != NULL);
72
73 cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
74
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050075 /* Restore the realm context assigned above */
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050076 cm_el2_sysregs_context_restore(REALM);
77 cm_set_next_eret_context(REALM);
78
79 /* Enter RMM */
80 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
81
Zelalem Awekef92c0cb2022-01-31 16:59:42 -060082 /*
Jayanth Dodderi Chidanandc05031c2023-09-12 12:07:56 +010083 * Save realm context. EL2 Non-secure context will be restored
84 * before exiting Non-secure world, therefore there is no need
85 * to clear EL2 context registers.
Zelalem Awekef92c0cb2022-01-31 16:59:42 -060086 */
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050087 cm_el2_sysregs_context_save(REALM);
88
Zelalem Aweke13dc8f12021-07-09 14:20:03 -050089 return rc;
90}
91
92/*******************************************************************************
93 * This function returns to the place where rmmd_rmm_sync_entry() was
94 * called originally.
95 ******************************************************************************/
96__dead2 void rmmd_rmm_sync_exit(uint64_t rc)
97{
98 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
99
100 /* Get context of the RMM in use by this CPU. */
101 assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
102
103 /*
104 * The RMMD must have initiated the original request through a
105 * synchronous entry into RMM. Jump back to the original C runtime
106 * context with the value of rc in x0;
107 */
108 rmmd_rmm_exit(ctx->c_rt_ctx, rc);
109
110 panic();
111}
112
113static void rmm_el2_context_init(el2_sysregs_t *regs)
114{
Jayanth Dodderi Chidanandfbbee6b2024-01-24 20:05:07 +0000115 write_el2_ctx_common(regs, spsr_el2, REALM_SPSR_EL2);
116 write_el2_ctx_common(regs, sctlr_el2, SCTLR_EL2_RES1);
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500117}
118
119/*******************************************************************************
Subhasish Ghoshc25225a2021-12-09 15:41:37 +0000120 * Enable architecture extensions on first entry to Realm world.
121 ******************************************************************************/
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100122
Subhasish Ghoshc25225a2021-12-09 15:41:37 +0000123static void manage_extensions_realm(cpu_context_t *ctx)
124{
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100125 /*
Arunachalam Ganapathya87a4092023-11-01 19:18:41 +0000126 * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world.
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100127 */
128 if (is_feat_sme_supported()) {
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100129 sme_enable(ctx);
130 }
Boyan Karatotev4a615bb2024-12-10 17:13:51 +0000131
132 /*
133 * SPE and TRBE cannot be fully disabled from EL3 registers alone, only
134 * sysreg access can. In case the EL1 controls leave them active on
135 * context switch, we want the owning security state to be NS so Realm
136 * can't be DOSed.
137 */
138 if (is_feat_spe_supported()) {
139 spe_disable(ctx);
140 }
141
142 if (is_feat_trbe_supported()) {
143 trbe_disable(ctx);
144 }
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100145}
146
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100147static void manage_extensions_realm_per_world(void)
148{
Jayanth Dodderi Chidanand56aa3822023-12-11 11:22:02 +0000149 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_REALM]);
150
Jayanth Dodderi Chidanandd62c6812023-03-07 10:43:19 +0000151 if (is_feat_sve_supported()) {
Subhasish Ghoshc25225a2021-12-09 15:41:37 +0000152 /*
153 * Enable SVE and FPU in realm context when it is enabled for NS.
154 * Realm manager must ensure that the SVE and FPU register
155 * contexts are properly managed.
156 */
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100157 sve_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
Jayanth Dodderi Chidanandd62c6812023-03-07 10:43:19 +0000158 }
Boyan Karatotev05504ba2023-02-15 13:21:50 +0000159
Boyan Karatotev919d3c82023-02-13 16:32:47 +0000160 /* NS can access this but Realm shouldn't */
161 if (is_feat_sys_reg_trace_supported()) {
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100162 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
Boyan Karatotev919d3c82023-02-13 16:32:47 +0000163 }
164
Arunachalam Ganapathya87a4092023-11-01 19:18:41 +0000165 /*
166 * If SME/SME2 is supported and enabled for NS world, then disable trapping
167 * of SME instructions for Realm world. RMM will save/restore required
168 * registers that are shared with SVE/FPU so that Realm can use FPU or SVE.
169 */
170 if (is_feat_sme_supported()) {
171 sme_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
172 }
Javier Almansa Sobrino7d892a12025-03-21 18:32:10 +0000173
174 /*
175 * If FEAT_MPAM is supported and enabled, then disable trapping access
176 * to the MPAM registers for Realm world. Instead, RMM will configure
177 * the access to be trapped by itself so it can inject undefined aborts
178 * back to the Realm.
179 */
180 if (is_feat_mpam_supported()) {
181 mpam_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
182 }
Subhasish Ghoshc25225a2021-12-09 15:41:37 +0000183}
184
185/*******************************************************************************
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500186 * Jump to the RMM for the first time.
187 ******************************************************************************/
188static int32_t rmm_init(void)
189{
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000190 long rc;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500191 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
192
193 INFO("RMM init start.\n");
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500194
Subhasish Ghoshc25225a2021-12-09 15:41:37 +0000195 /* Enable architecture extensions */
196 manage_extensions_realm(&ctx->cpu_ctx);
197
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100198 manage_extensions_realm_per_world();
199
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500200 /* Initialize RMM EL2 context. */
201 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
202
203 rc = rmmd_rmm_sync_entry(ctx);
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000204 if (rc != E_RMM_BOOT_SUCCESS) {
205 ERROR("RMM init failed: %ld\n", rc);
206 /* Mark the boot as failed for all the CPUs */
207 rmm_boot_failed = true;
208 return 0;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500209 }
210
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500211 INFO("RMM init end.\n");
212
213 return 1;
214}
215
216/*******************************************************************************
217 * Load and read RMM manifest, setup RMM.
218 ******************************************************************************/
219int rmmd_setup(void)
220{
Javier Almansa Sobrinodea652e2022-04-13 17:57:35 +0100221 size_t shared_buf_size __unused;
222 uintptr_t shared_buf_base;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500223 uint32_t ep_attr;
224 unsigned int linear_id = plat_my_core_pos();
225 rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
AlexeiFedorov8e754f92022-12-14 17:28:11 +0000226 struct rmm_manifest *manifest;
Javier Almansa Sobrino4165e842022-04-25 17:18:15 +0100227 int rc;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500228
229 /* Make sure RME is supported. */
Varun Wadekar073bcf72024-07-15 20:40:05 +0000230 if (is_feat_rme_present() == 0U) {
231 /* Mark the RMM boot as failed for all the CPUs */
232 rmm_boot_failed = true;
233 return -ENOTSUP;
234 }
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500235
236 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
Varun Wadekar82440992024-07-16 09:45:14 +0000237 if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) {
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500238 WARN("No RMM image provided by BL2 boot loader, Booting "
239 "device without RMM initialization. SMCs destined for "
240 "RMM will return SMC_UNK\n");
Varun Wadekar073bcf72024-07-15 20:40:05 +0000241
Varun Wadekar50e7d032024-07-15 20:51:44 +0000242 /* Mark the boot as failed for all the CPUs */
243 rmm_boot_failed = true;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500244 return -ENOENT;
245 }
246
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500247 /* Initialise an entrypoint to set up the CPU context */
248 ep_attr = EP_REALM;
249 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
250 ep_attr |= EP_EE_BIG;
251 }
252
253 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
254 rmm_ep_info->spsr = SPSR_64(MODE_EL2,
255 MODE_SP_ELX,
256 DISABLE_ALL_EXCEPTIONS);
257
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000258 shared_buf_size =
259 plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
260
261 assert((shared_buf_size == SZ_4K) &&
262 ((void *)shared_buf_base != NULL));
263
Soby Mathew414043d2024-03-26 17:16:00 +0000264 /* Zero out and load the boot manifest at the beginning of the share area */
AlexeiFedorov8e754f92022-12-14 17:28:11 +0000265 manifest = (struct rmm_manifest *)shared_buf_base;
Harry Moultone67b1272024-04-04 09:09:25 +0100266 (void)memset((void *)manifest, 0, sizeof(struct rmm_manifest));
Soby Mathew414043d2024-03-26 17:16:00 +0000267
Javier Almansa Sobrino4165e842022-04-25 17:18:15 +0100268 rc = plat_rmmd_load_manifest(manifest);
269 if (rc != 0) {
270 ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
Varun Wadekarb4446412024-07-21 11:37:49 +0000271 /* Mark the boot as failed for all the CPUs */
272 rmm_boot_failed = true;
Javier Almansa Sobrino4165e842022-04-25 17:18:15 +0100273 return rc;
274 }
275 flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
276
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000277 /*
278 * Prepare coldboot arguments for RMM:
279 * arg0: This CPUID (primary processor).
280 * arg1: Version for this Boot Interface.
281 * arg2: PLATFORM_CORE_COUNT.
282 * arg3: Base address for the EL3 <-> RMM shared area. The boot
283 * manifest will be stored at the beginning of this area.
284 */
285 rmm_ep_info->args.arg0 = linear_id;
286 rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
287 rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
288 rmm_ep_info->args.arg3 = shared_buf_base;
289
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500290 /* Initialise RMM context with this entry point information */
291 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
292
293 INFO("RMM setup done.\n");
294
295 /* Register init function for deferred init. */
296 bl31_register_rmm_init(&rmm_init);
297
298 return 0;
299}
300
301/*******************************************************************************
302 * Forward SMC to the other security state
303 ******************************************************************************/
Soby Mathewfccd3ea2021-11-17 15:13:30 +0000304static uint64_t rmmd_smc_forward(uint32_t src_sec_state,
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100305 uint32_t dst_sec_state, uint64_t x0,
306 uint64_t x1, uint64_t x2, uint64_t x3,
307 uint64_t x4, void *handle)
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500308{
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100309 cpu_context_t *ctx = cm_get_context(dst_sec_state);
310
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500311 /* Save incoming security state */
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500312 cm_el2_sysregs_context_save(src_sec_state);
313
314 /* Restore outgoing security state */
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500315 cm_el2_sysregs_context_restore(dst_sec_state);
316 cm_set_next_eret_context(dst_sec_state);
317
Soby Mathewfccd3ea2021-11-17 15:13:30 +0000318 /*
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100319 * As per SMCCCv1.2, we need to preserve x4 to x7 unless
Soby Mathewfccd3ea2021-11-17 15:13:30 +0000320 * being used as return args. Hence we differentiate the
321 * onward and backward path. Support upto 8 args in the
322 * onward path and 4 args in return path.
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100323 * Register x4 will be preserved by RMM in case it is not
324 * used in return path.
Soby Mathewfccd3ea2021-11-17 15:13:30 +0000325 */
326 if (src_sec_state == NON_SECURE) {
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100327 SMC_RET8(ctx, x0, x1, x2, x3, x4,
328 SMC_GET_GP(handle, CTX_GPREG_X5),
329 SMC_GET_GP(handle, CTX_GPREG_X6),
330 SMC_GET_GP(handle, CTX_GPREG_X7));
Soby Mathewfccd3ea2021-11-17 15:13:30 +0000331 }
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100332
333 SMC_RET5(ctx, x0, x1, x2, x3, x4);
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500334}
335
336/*******************************************************************************
337 * This function handles all SMCs in the range reserved for RMI. Each call is
338 * either forwarded to the other security state or handled by the RMM dispatcher
339 ******************************************************************************/
340uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100341 uint64_t x3, uint64_t x4, void *cookie,
342 void *handle, uint64_t flags)
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500343{
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500344 uint32_t src_sec_state;
345
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000346 /* If RMM failed to boot, treat any RMI SMC as unknown */
347 if (rmm_boot_failed) {
348 WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
349 SMC_RET1(handle, SMC_UNK);
350 }
351
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500352 /* Determine which security state this SMC originated from */
353 src_sec_state = caller_sec_state(flags);
354
355 /* RMI must not be invoked by the Secure world */
356 if (src_sec_state == SMC_FROM_SECURE) {
Soby Mathew68ea9542022-03-22 13:58:52 +0000357 WARN("RMMD: RMI invoked by secure world.\n");
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500358 SMC_RET1(handle, SMC_UNK);
359 }
360
361 /*
362 * Forward an RMI call from the Normal world to the Realm world as it
363 * is.
364 */
365 if (src_sec_state == SMC_FROM_NON_SECURE) {
Arunachalam Ganapathy6e84add2023-08-24 15:31:01 +0100366 /*
367 * If SVE hint bit is set in the flags then update the SMC
368 * function id and pass it on to the lower EL.
369 */
370 if (is_sve_hint_set(flags)) {
371 smc_fid |= (FUNCID_SVE_HINT_MASK <<
372 FUNCID_SVE_HINT_SHIFT);
373 }
Soby Mathew68ea9542022-03-22 13:58:52 +0000374 VERBOSE("RMMD: RMI call from non-secure world.\n");
Soby Mathewfccd3ea2021-11-17 15:13:30 +0000375 return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500376 x1, x2, x3, x4, handle);
377 }
378
Soby Mathew68ea9542022-03-22 13:58:52 +0000379 if (src_sec_state != SMC_FROM_REALM) {
380 SMC_RET1(handle, SMC_UNK);
381 }
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500382
383 switch (smc_fid) {
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100384 case RMM_RMI_REQ_COMPLETE: {
385 uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500386
AlexeiFedorov90ce18f2022-09-23 16:57:28 +0100387 return rmmd_smc_forward(REALM, NON_SECURE, x1,
388 x2, x3, x4, x5, handle);
389 }
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500390 default:
Soby Mathew68ea9542022-03-22 13:58:52 +0000391 WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500392 SMC_RET1(handle, SMC_UNK);
393 }
394}
395
396/*******************************************************************************
397 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM
398 * is done after initialising minimal architectural state that guarantees safe
399 * execution.
400 ******************************************************************************/
401static void *rmmd_cpu_on_finish_handler(const void *arg)
402{
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000403 long rc;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500404 uint32_t linear_id = plat_my_core_pos();
405 rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
406
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000407 if (rmm_boot_failed) {
408 /* RMM Boot failed on a previous CPU. Abort. */
409 ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
410 linear_id);
411 return NULL;
412 }
413
414 /*
415 * Prepare warmboot arguments for RMM:
416 * arg0: This CPUID.
417 * arg1 to arg3: Not used.
418 */
419 rmm_ep_info->args.arg0 = linear_id;
420 rmm_ep_info->args.arg1 = 0ULL;
421 rmm_ep_info->args.arg2 = 0ULL;
422 rmm_ep_info->args.arg3 = 0ULL;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500423
424 /* Initialise RMM context with this entry point information */
425 cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
426
Subhasish Ghoshc25225a2021-12-09 15:41:37 +0000427 /* Enable architecture extensions */
428 manage_extensions_realm(&ctx->cpu_ctx);
429
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500430 /* Initialize RMM EL2 context. */
431 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
432
433 rc = rmmd_rmm_sync_entry(ctx);
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000434
435 if (rc != E_RMM_BOOT_SUCCESS) {
436 ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
437 /* Mark the boot as failed for any other booting CPU */
438 rmm_boot_failed = true;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500439 }
440
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500441 return NULL;
442}
443
444/* Subscribe to PSCI CPU on to initialize RMM on secondary */
445SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
446
Soby Mathew68ea9542022-03-22 13:58:52 +0000447/* Convert GPT lib error to RMMD GTS error */
448static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
449{
450 int ret;
451
452 if (error == 0) {
Javier Almansa Sobrinodea652e2022-04-13 17:57:35 +0100453 return E_RMM_OK;
Soby Mathew68ea9542022-03-22 13:58:52 +0000454 }
455
456 if (error == -EINVAL) {
Javier Almansa Sobrinodea652e2022-04-13 17:57:35 +0100457 ret = E_RMM_BAD_ADDR;
Soby Mathew68ea9542022-03-22 13:58:52 +0000458 } else {
459 /* This is the only other error code we expect */
460 assert(error == -EPERM);
Javier Almansa Sobrinodea652e2022-04-13 17:57:35 +0100461 ret = E_RMM_BAD_PAS;
Soby Mathew68ea9542022-03-22 13:58:52 +0000462 }
463
464 ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
465 error, address, smc_fid);
466 return ret;
467}
468
Raghu Krishnamurthyc11b60e2024-06-03 19:02:29 -0700469static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx,
470 uint64_t *feat_reg)
471{
472 if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) {
473 ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx);
474 return E_RMM_INVAL;
475 }
476
477 *feat_reg = 0UL;
478#if RMMD_ENABLE_EL3_TOKEN_SIGN
479 *feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK;
480#endif
481 return E_RMM_OK;
482}
483
Tushar Khandelwal01365af2024-04-22 15:35:40 +0100484/*
485 * Update encryption key associated with @mecid.
486 */
487static int rmmd_mecid_key_update(uint64_t mecid)
488{
489 uint64_t mecid_width, mecid_width_mask;
490 int ret;
491
492 /*
Juan Pablo Conde72ae4822025-03-24 16:18:48 -0500493 * Check whether FEAT_MEC is supported by the hardware. If not, return
494 * unknown SMC.
495 */
496 if (is_feat_mec_supported() == false) {
497 return E_RMM_UNK;
498 }
499
500 /*
Tushar Khandelwal01365af2024-04-22 15:35:40 +0100501 * Check whether the mecid parameter is at most MECIDR_EL2.MECIDWidthm1 + 1
502 * in length.
503 */
504 mecid_width = ((read_mecidr_el2() >> MECIDR_EL2_MECIDWidthm1_SHIFT) &
505 MECIDR_EL2_MECIDWidthm1_MASK) + 1;
506 mecid_width_mask = ((1 << mecid_width) - 1);
507 if ((mecid & ~mecid_width_mask) != 0U) {
508 return E_RMM_INVAL;
509 }
510
511 ret = plat_rmmd_mecid_key_update(mecid);
512
513 if (ret != 0) {
514 return E_RMM_UNK;
515 }
516 return E_RMM_OK;
517}
518
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500519/*******************************************************************************
Soby Mathew68ea9542022-03-22 13:58:52 +0000520 * This function handles RMM-EL3 interface SMCs
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500521 ******************************************************************************/
Soby Mathew68ea9542022-03-22 13:58:52 +0000522uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500523 uint64_t x3, uint64_t x4, void *cookie,
524 void *handle, uint64_t flags)
525{
Raghu Krishnamurthyc11b60e2024-06-03 19:02:29 -0700526 uint64_t remaining_len = 0UL;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500527 uint32_t src_sec_state;
Robert Wakim48e6b572021-10-21 15:39:56 +0100528 int ret;
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500529
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000530 /* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
531 if (rmm_boot_failed) {
532 WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
533 SMC_RET1(handle, SMC_UNK);
534 }
535
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500536 /* Determine which security state this SMC originated from */
537 src_sec_state = caller_sec_state(flags);
538
539 if (src_sec_state != SMC_FROM_REALM) {
Soby Mathew68ea9542022-03-22 13:58:52 +0000540 WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500541 SMC_RET1(handle, SMC_UNK);
542 }
543
544 switch (smc_fid) {
Javier Almansa Sobrinof809b162022-07-04 17:06:36 +0100545 case RMM_GTSI_DELEGATE:
Robert Wakim48e6b572021-10-21 15:39:56 +0100546 ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
Soby Mathew68ea9542022-03-22 13:58:52 +0000547 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
Javier Almansa Sobrinof809b162022-07-04 17:06:36 +0100548 case RMM_GTSI_UNDELEGATE:
Robert Wakim48e6b572021-10-21 15:39:56 +0100549 ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
Soby Mathew68ea9542022-03-22 13:58:52 +0000550 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
Javier Almansa Sobrinof809b162022-07-04 17:06:36 +0100551 case RMM_ATTEST_GET_REALM_KEY:
Soby Mathewf05d93a2022-03-22 16:21:19 +0000552 ret = rmmd_attest_get_signing_key(x1, &x2, x3);
553 SMC_RET2(handle, ret, x2);
Sona Mathew04774e72025-03-11 15:59:02 -0500554 case RMM_ATTEST_GET_PLAT_TOKEN:
555 ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len);
556 SMC_RET3(handle, ret, x2, remaining_len);
Raghu Krishnamurthyc11b60e2024-06-03 19:02:29 -0700557 case RMM_EL3_FEATURES:
558 ret = rmm_el3_ifc_get_feat_register(x1, &x2);
559 SMC_RET2(handle, ret, x2);
560#if RMMD_ENABLE_EL3_TOKEN_SIGN
561 case RMM_EL3_TOKEN_SIGN:
562 return rmmd_el3_token_sign(handle, x1, x2, x3, x4);
563#endif
Javier Almansa Sobrino7176a772021-11-24 18:37:37 +0000564 case RMM_BOOT_COMPLETE:
565 VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
566 rmmd_rmm_sync_exit(x1);
567
Tushar Khandelwal01365af2024-04-22 15:35:40 +0100568 case RMM_MECID_KEY_UPDATE:
569 ret = rmmd_mecid_key_update(x1);
570 SMC_RET1(handle, ret);
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500571 default:
Soby Mathew68ea9542022-03-22 13:58:52 +0000572 WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500573 SMC_RET1(handle, SMC_UNK);
574 }
Zelalem Aweke13dc8f12021-07-09 14:20:03 -0500575}