Merge "feat(git-hooks): add pre-commit hook" into integration
diff --git a/Makefile b/Makefile
index f4d623e..e2922a2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -153,6 +153,9 @@
ENABLE_FEAT_ECV = 1
ENABLE_FEAT_FGT = 1
+# RME enables CSV2_2 extension by default.
+ENABLE_FEAT_CSV2_2 = 1
+
endif
# USE_SPINLOCK_CAS requires AArch64 build
diff --git a/docs/components/fconf/fconf_properties.rst b/docs/components/fconf/fconf_properties.rst
index 20cc758..3479576 100644
--- a/docs/components/fconf/fconf_properties.rst
+++ b/docs/components/fconf/fconf_properties.rst
@@ -20,7 +20,9 @@
- load-address [mandatory]
- value type: <u64>
- - Physical loading base address of the configuration.
+ - Physical loading base address of the configuration.
+ If secondary-load-address is also provided (see below), then this is the
+ primary load address.
- max-size [mandatory]
- value type: <u32>
@@ -30,10 +32,11 @@
- value type: <u32>
- Image ID of the configuration.
-- ns-load-address [optional]
+- secondary-load-address [optional]
- value type: <u64>
- - Physical loading base address of the configuration in the non-secure
- memory.
- Only needed by those configuration files which require being loaded
- in secure memory (at load-address) as well as in non-secure memory
- e.g. HW_CONFIG
+ - A platform uses this physical address to copy the configuration to
+ another location during the boot-flow.
+
+--------------
+
+*Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.*
diff --git a/docs/threat_model/threat_model.rst b/docs/threat_model/threat_model.rst
index 0e967ba..940cad5 100644
--- a/docs/threat_model/threat_model.rst
+++ b/docs/threat_model/threat_model.rst
@@ -921,16 +921,16 @@
+------------------------+-----------------------------------------------------+
| ID | 14 |
+========================+=====================================================+
-| Threat | | **Security vulnerabilities in the Non-secure OS |
-| | can lead to secure world compromise if the option |
-| | OPTEE_ALLOW_SMC_LOAD is enabled.** |
+| Threat | | **Attacker wants to execute an arbitrary or |
+| | untrusted binary as the secure OS.** |
| | |
-| | | This option trusts the non-secure world up until |
-| | the point it issues the SMC call to load the |
-| | Secure BL32 payload. If a compromise occurs |
-| | before the SMC call is invoked, then arbitrary |
-| | code execution in S-EL1 can occur or arbitrary |
-| | memory in EL3 can be overwritten. |
+| | | When the option OPTEE_ALLOW_SMC_LOAD is enabled, |
+| | this trusts the non-secure world up until the |
+| | point it issues the SMC call to load the Secure |
+| | BL32 payload. If a compromise occurs before the |
+| | SMC call is invoked, then arbitrary code execution|
+| | in S-EL1 can occur or arbitrary memory in EL3 can |
+| | be overwritten. |
+------------------------+-----------------------------------------------------+
| Diagram Elements | DF5 |
+------------------------+-----------------------------------------------------+
@@ -948,9 +948,9 @@
+------------------------+-----------------+-----------------+-----------------+
| Impact | Critical (5) | Critical (5) | Critical (5) |
+------------------------+-----------------+-----------------+-----------------+
-| Likelihood | Low (2) | Low (2) | Low (2) |
+| Likelihood | High (4) | High (4) | High (4) |
+------------------------+-----------------+-----------------+-----------------+
-| Total Risk Rating | Medium (10) | Medium (10) | Medium (10) |
+| Total Risk Rating | Critical (20) | Critical (20) | Critical (20) |
+------------------------+-----------------+-----------------+-----------------+
| Mitigations | When enabling the option OPTEE_ALLOW_SMC_LOAD, |
| | the non-secure OS must be considered a closed |
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index 9e13c3d..9e4a3b7 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -1063,13 +1063,17 @@
#define PMBLIMITR_EL1 S3_0_C9_C10_0
/*******************************************************************************
- * Definitions for system register interface to MPAM
+ * Definitions for system register interface, shifts and masks for MPAM
******************************************************************************/
#define MPAMIDR_EL1 S3_0_C10_C4_4
#define MPAM2_EL2 S3_4_C10_C5_0
#define MPAMHCR_EL2 S3_4_C10_C4_0
#define MPAM3_EL3 S3_6_C10_C5_0
+#define MPAMIDR_EL1_HAS_HCR_SHIFT ULL(0x11)
+#define MPAMIDR_EL1_VPMR_MAX_SHIFT ULL(0x12)
+#define MPAMIDR_EL1_VPMR_MAX_WIDTH ULL(0x3)
+#define MPAMIDR_EL1_VPMR_MAX_POSSIBLE ULL(0x7)
/*******************************************************************************
* Definitions for system register interface to AMU for FEAT_AMUv1
******************************************************************************/
diff --git a/include/lib/fconf/fconf_dyn_cfg_getter.h b/include/lib/fconf/fconf_dyn_cfg_getter.h
index 43f298e..3554673 100644
--- a/include/lib/fconf/fconf_dyn_cfg_getter.h
+++ b/include/lib/fconf/fconf_dyn_cfg_getter.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -19,12 +19,11 @@
uint32_t config_max_size;
unsigned int config_id;
/*
- * Load address in non-secure memory. Only needed by those
- * configuration files which require being loaded in secure
- * memory (at config_addr) as well as in non-secure memory
+ * A platform uses this address to copy the configuration
+ * to another location during the boot-flow.
* - e.g. HW_CONFIG
*/
- uintptr_t ns_config_addr;
+ uintptr_t secondary_config_addr;
};
unsigned int dyn_cfg_dtb_info_get_index(unsigned int config_id);
@@ -32,7 +31,7 @@
int fconf_populate_dtb_registry(uintptr_t config);
/* Set config information in global DTB array */
-void set_config_info(uintptr_t config_addr, uintptr_t ns_config_addr,
+void set_config_info(uintptr_t config_addr, uintptr_t secondary_config_addr,
uint32_t config_max_size,
unsigned int config_id);
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index b5d61ff..722b8ae 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -257,52 +257,200 @@
mrs x10, MPAM2_EL2
str x10, [x0, #CTX_MPAM2_EL2]
+ mrs x10, MPAMIDR_EL1
+
+ /*
+ * The context registers that we intend to save would be part of the
+ * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
+ */
+ tbz w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
+
+ /*
+ * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
+ * system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to save
+ * the context of these registers.
+ */
mrs x11, MPAMHCR_EL2
mrs x12, MPAMVPM0_EL2
stp x11, x12, [x0, #CTX_MPAMHCR_EL2]
- mrs x13, MPAMVPM1_EL2
- mrs x14, MPAMVPM2_EL2
- stp x13, x14, [x0, #CTX_MPAMVPM1_EL2]
+ mrs x13, MPAMVPMV_EL2
+ str x13, [x0, #CTX_MPAMVPMV_EL2]
- mrs x15, MPAMVPM3_EL2
- mrs x16, MPAMVPM4_EL2
- stp x15, x16, [x0, #CTX_MPAMVPM3_EL2]
+ /*
+ * MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
+ * VPMR value. Proceed to save the context of registers from
+ * MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. From MPAM spec,
+ * VPMR_MAX should not be zero if HAS_HCR == 1.
+ */
+ ubfx x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
+ #MPAMIDR_EL1_VPMR_MAX_WIDTH
- mrs x9, MPAMVPM5_EL2
- mrs x10, MPAMVPM6_EL2
- stp x9, x10, [x0, #CTX_MPAMVPM5_EL2]
+ /*
+ * Once VPMR_MAX has been identified, calculate the offset relative to
+ * PC to jump to so that relevant context can be saved. The offset is
+ * calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
+ * saving one VPM register) + (absolute address of label "1").
+ */
+ mov w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
+ sub w10, w11, w10
- mrs x11, MPAMVPM7_EL2
- mrs x12, MPAMVPMV_EL2
- stp x11, x12, [x0, #CTX_MPAMVPM7_EL2]
- ret
+ /* Calculate the size of one block of MPAMVPM*_EL2 save */
+ adr x11, 1f
+ adr x12, 2f
+ sub x12, x12, x11
+
+ madd x10, x10, x12, x11
+ br x10
+
+ /*
+ * The branch above would land properly on one of the blocks following
+ * label "1". Make sure that the order of save is retained.
+ */
+1:
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x10, MPAMVPM7_EL2
+ str x10, [x0, #CTX_MPAMVPM7_EL2]
+2:
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x11, MPAMVPM6_EL2
+ str x11, [x0, #CTX_MPAMVPM6_EL2]
+
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x12, MPAMVPM5_EL2
+ str x12, [x0, #CTX_MPAMVPM5_EL2]
+
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x13, MPAMVPM4_EL2
+ str x13, [x0, #CTX_MPAMVPM4_EL2]
+
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x14, MPAMVPM3_EL2
+ str x14, [x0, #CTX_MPAMVPM3_EL2]
+
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x15, MPAMVPM2_EL2
+ str x15, [x0, #CTX_MPAMVPM2_EL2]
+
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x16, MPAMVPM1_EL2
+ str x16, [x0, #CTX_MPAMVPM1_EL2]
+
+3: ret
endfunc el2_sysregs_context_save_mpam
func el2_sysregs_context_restore_mpam
ldr x10, [x0, #CTX_MPAM2_EL2]
msr MPAM2_EL2, x10
+ mrs x10, MPAMIDR_EL1
+ /*
+ * The context registers that we intend to restore would be part of the
+ * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
+ */
+ tbz w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
+
+ /*
+ * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
+ * system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to restore
+ * the context of these registers
+ */
ldp x11, x12, [x0, #CTX_MPAMHCR_EL2]
msr MPAMHCR_EL2, x11
msr MPAMVPM0_EL2, x12
- ldp x13, x14, [x0, #CTX_MPAMVPM1_EL2]
- msr MPAMVPM1_EL2, x13
- msr MPAMVPM2_EL2, x14
+ ldr x13, [x0, #CTX_MPAMVPMV_EL2]
+ msr MPAMVPMV_EL2, x13
- ldp x15, x16, [x0, #CTX_MPAMVPM3_EL2]
- msr MPAMVPM3_EL2, x15
- msr MPAMVPM4_EL2, x16
+ /*
+ * MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
+ * VPMR value. Proceed to restore the context of registers from
+ * MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. from MPAM spec,
+ * VPMR_MAX should not be zero if HAS_HCR == 1.
+ */
+ ubfx x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
+ #MPAMIDR_EL1_VPMR_MAX_WIDTH
- ldp x9, x10, [x0, #CTX_MPAMVPM5_EL2]
- msr MPAMVPM5_EL2, x9
- msr MPAMVPM6_EL2, x10
+ /*
+ * Once VPMR_MAX has been identified, calculate the offset relative to
+ * PC to jump to so that relevant context can be restored. The offset is
+ * calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
+ * restoring one VPM register) + (absolute address of label "1").
+ */
+ mov w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
+ sub w10, w11, w10
- ldp x11, x12, [x0, #CTX_MPAMVPM7_EL2]
- msr MPAMVPM7_EL2, x11
- msr MPAMVPMV_EL2, x12
- ret
+ /* Calculate the size of one block of MPAMVPM*_EL2 restore */
+ adr x11, 1f
+ adr x12, 2f
+ sub x12, x12, x11
+
+ madd x10, x10, x12, x11
+ br x10
+
+ /*
+ * The branch above would land properly on one of the blocks following
+ * label "1". Make sure that the order of restore is retained.
+ */
+1:
+
+#if ENABLE_BTI
+ bti j
+#endif
+ ldr x10, [x0, #CTX_MPAMVPM7_EL2]
+ msr MPAMVPM7_EL2, x10
+2:
+#if ENABLE_BTI
+ bti j
+#endif
+ ldr x11, [x0, #CTX_MPAMVPM6_EL2]
+ msr MPAMVPM6_EL2, x11
+
+#if ENABLE_BTI
+ bti j
+#endif
+ ldr x12, [x0, #CTX_MPAMVPM5_EL2]
+ msr MPAMVPM5_EL2, x12
+
+#if ENABLE_BTI
+ bti j
+#endif
+ ldr x13, [x0, #CTX_MPAMVPM4_EL2]
+ msr MPAMVPM4_EL2, x13
+
+#if ENABLE_BTI
+ bti j
+#endif
+ ldr x14, [x0, #CTX_MPAMVPM3_EL2]
+ msr MPAMVPM3_EL2, x14
+
+#if ENABLE_BTI
+ bti j
+#endif
+ ldr x15, [x0, #CTX_MPAMVPM2_EL2]
+ msr MPAMVPM2_EL2, x15
+
+#if ENABLE_BTI
+ bti j
+#endif
+ ldr x16, [x0, #CTX_MPAMVPM1_EL2]
+ msr MPAMVPM1_EL2, x16
+
+3: ret
endfunc el2_sysregs_context_restore_mpam
#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 3bcefdb..dab25d6 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -169,7 +169,12 @@
state = get_el3state_ctx(ctx);
scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
- scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT | SCR_EnSCXT_BIT;
+ scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT;
+
+#if ENABLE_FEAT_CSV2_2
+ /* Enable access to the SCXTNUM_ELx registers. */
+ scr_el3 |= SCR_EnSCXT_BIT;
+#endif
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
}
@@ -222,6 +227,11 @@
scr_el3 |= SCR_TERR_BIT;
#endif
+#if ENABLE_FEAT_CSV2_2
+ /* Enable access to the SCXTNUM_ELx registers. */
+ scr_el3 |= SCR_EnSCXT_BIT;
+#endif
+
#ifdef IMAGE_BL31
/*
* SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
diff --git a/lib/fconf/fconf_dyn_cfg_getter.c b/lib/fconf/fconf_dyn_cfg_getter.c
index 351772e..13081b0 100644
--- a/lib/fconf/fconf_dyn_cfg_getter.c
+++ b/lib/fconf/fconf_dyn_cfg_getter.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -31,7 +31,7 @@
* This function is used to alloc memory for config information from
* global pool and set the configuration information.
*/
-void set_config_info(uintptr_t config_addr, uintptr_t ns_config_addr,
+void set_config_info(uintptr_t config_addr, uintptr_t secondary_config_addr,
uint32_t config_max_size,
unsigned int config_id)
{
@@ -39,7 +39,7 @@
dtb_info = pool_alloc(&dtb_info_pool);
dtb_info->config_addr = config_addr;
- dtb_info->ns_config_addr = ns_config_addr;
+ dtb_info->secondary_config_addr = secondary_config_addr;
dtb_info->config_max_size = config_max_size;
dtb_info->config_id = config_id;
}
@@ -106,7 +106,7 @@
fdt_for_each_subnode(child, dtb, node) {
uint32_t config_max_size, config_id;
uintptr_t config_addr;
- uintptr_t ns_config_addr = ~0UL;
+ uintptr_t secondary_config_addr = ~0UL;
uint64_t val64;
/* Read configuration dtb information */
@@ -134,14 +134,16 @@
VERBOSE("\tmax-size = 0x%x\n", config_max_size);
VERBOSE("\tconfig-id = %u\n", config_id);
- rc = fdt_read_uint64(dtb, child, "ns-load-address", &val64);
+ rc = fdt_read_uint64(dtb, child, "secondary-load-address",
+ &val64);
if (rc == 0) {
- ns_config_addr = (uintptr_t)val64;
- VERBOSE("\tns-load-address = %lx\n", ns_config_addr);
+ secondary_config_addr = (uintptr_t)val64;
+ VERBOSE("\tsecondary-load-address = %lx\n",
+ secondary_config_addr);
}
- set_config_info(config_addr, ns_config_addr, config_max_size,
- config_id);
+ set_config_info(config_addr, secondary_config_addr,
+ config_max_size, config_id);
}
if ((child < 0) && (child != -FDT_ERR_NOTFOUND)) {
diff --git a/plat/arm/board/fvp/fdts/fvp_fw_config.dts b/plat/arm/board/fvp/fdts/fvp_fw_config.dts
index 577ac74..4adf5d5 100644
--- a/plat/arm/board/fvp/fdts/fvp_fw_config.dts
+++ b/plat/arm/board/fvp/fdts/fvp_fw_config.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -22,7 +22,7 @@
load-address = <0x0 0x07f00000>;
max-size = <0x00100000>;
id = <HW_CONFIG_ID>;
- ns-load-address = <0x0 0x82000000>;
+ secondary-load-address = <0x0 0x82000000>;
};
/*
@@ -40,7 +40,11 @@
/* If required, SPD should enable loading of trusted OS fw config */
#if defined(SPD_tspd) || defined(SPD_spmd)
tos_fw-config {
+
load-address = <0x0 0x04001500>;
+#if ENABLE_RME
+ secondary-load-address = <0x0 0x7e00000>;
+#endif /* ENABLE_RME */
max-size = <0xB00>;
id = <TOS_FW_CONFIG_ID>;
};
diff --git a/plat/arm/board/fvp/fvp_bl2_setup.c b/plat/arm/board/fvp/fvp_bl2_setup.c
index 74e5d72..4c71d81 100644
--- a/plat/arm/board/fvp/fvp_bl2_setup.c
+++ b/plat/arm/board/fvp/fvp_bl2_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -82,7 +82,7 @@
assert(param_node != NULL);
/* Copy HW config from Secure address to NS address */
- memcpy((void *)hw_config_info->ns_config_addr,
+ memcpy((void *)hw_config_info->secondary_config_addr,
(void *)hw_config_info->config_addr,
(size_t)param_node->image_info.image_size);
@@ -91,14 +91,14 @@
* a possibility to use HW-config without cache and MMU enabled
* at BL33
*/
- flush_dcache_range(hw_config_info->ns_config_addr,
+ flush_dcache_range(hw_config_info->secondary_config_addr,
param_node->image_info.image_size);
param_node = get_bl_mem_params_node(BL33_IMAGE_ID);
assert(param_node != NULL);
/* Update BL33's ep info with NS HW config address */
- param_node->ep_info.args.arg1 = hw_config_info->ns_config_addr;
+ param_node->ep_info.args.arg1 = hw_config_info->secondary_config_addr;
#endif /* !BL2_AT_EL3 && !EL3_PAYLOAD_BASE */
return arm_bl_params;
diff --git a/plat/arm/board/fvp/fvp_bl31_setup.c b/plat/arm/board/fvp/fvp_bl31_setup.c
index dd90965..57865eb 100644
--- a/plat/arm/board/fvp/fvp_bl31_setup.c
+++ b/plat/arm/board/fvp/fvp_bl31_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -45,8 +45,8 @@
*/
hw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, HW_CONFIG_ID);
assert(hw_config_info != NULL);
- assert(hw_config_info->ns_config_addr != 0UL);
- arg2 = hw_config_info->ns_config_addr;
+ assert(hw_config_info->secondary_config_addr != 0UL);
+ arg2 = hw_config_info->secondary_config_addr;
#endif /* !RESET_TO_BL31 && !BL2_AT_EL3 */
arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3);
diff --git a/plat/common/plat_spmd_manifest.c b/plat/common/plat_spmd_manifest.c
index b1fc13c..5f7d142 100644
--- a/plat/common/plat_spmd_manifest.c
+++ b/plat/common/plat_spmd_manifest.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -150,7 +150,7 @@
rc = mmap_add_dynamic_region((unsigned long long)pm_base_align,
pm_base_align,
PAGE_SIZE,
- MT_RO_DATA);
+ MT_RO_DATA | EL3_PAS);
if (rc != 0) {
ERROR("Error while mapping SPM Core manifest (%d).\n", rc);
return rc;
diff --git a/services/spd/opteed/opteed_main.c b/services/spd/opteed/opteed_main.c
index ff2aee0..ff09e7e 100644
--- a/services/spd/opteed/opteed_main.c
+++ b/services/spd/opteed/opteed_main.c
@@ -168,7 +168,8 @@
* used. It also assumes that a valid non-secure context has been
* initialised by PSCI so it does not need to save and restore any
* non-secure state. This function performs a synchronous entry into
- * OPTEE. OPTEE passes control back to this routine through a SMC.
+ * OPTEE. OPTEE passes control back to this routine through a SMC. This returns
+ * a non-zero value on success and zero on failure.
******************************************************************************/
static int32_t
opteed_init_with_entry_point(entry_point_info_t *optee_entry_point)
@@ -232,6 +233,10 @@
mapped_data_va = mapped_data_pa;
data_map_size = page_align(data_size + (mapped_data_pa - data_pa), UP);
+ /*
+ * We do not validate the passed in address because we are trusting the
+ * non-secure world at this point still.
+ */
rc = mmap_add_dynamic_region(mapped_data_pa, mapped_data_va,
data_map_size, MT_MEMORY | MT_RO | MT_NS);
if (rc != 0) {
@@ -290,7 +295,9 @@
0,
0,
&opteed_sp_context[linear_id]);
- rc = opteed_init_with_entry_point(&optee_ep_info);
+ if (opteed_init_with_entry_point(&optee_ep_info) == 0) {
+ rc = -EFAULT;
+ }
/* Restore non-secure state */
cm_el1_sysregs_context_restore(NON_SECURE);
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index afd0f2e..dde1622 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -16,10 +16,14 @@
#include <bl31/interrupt_mgmt.h>
#include <common/debug.h>
#include <common/runtime_svc.h>
+#include <common/tbbr/tbbr_img_def.h>
#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/fconf/fconf.h>
+#include <lib/fconf/fconf_dyn_cfg_getter.h>
#include <lib/smccc.h>
#include <lib/spinlock.h>
#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
#include <plat/common/common_def.h>
#include <plat/common/platform.h>
#include <platform_def.h>
@@ -245,6 +249,92 @@
SMC_RET0(&ctx->cpu_ctx);
}
+#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
+static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
+ unsigned int attr, uintptr_t *align_addr,
+ size_t *align_size)
+{
+ uintptr_t base_addr_align;
+ size_t mapped_size_align;
+ int rc;
+
+ /* Page aligned address and size if necessary */
+ base_addr_align = page_align(base_addr, DOWN);
+ mapped_size_align = page_align(size, UP);
+
+ if ((base_addr != base_addr_align) &&
+ (size == mapped_size_align)) {
+ mapped_size_align += PAGE_SIZE;
+ }
+
+ /*
+ * Map dynamically given region with its aligned base address and
+ * size
+ */
+ rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
+ base_addr_align,
+ mapped_size_align,
+ attr);
+ if (rc == 0) {
+ *align_addr = base_addr_align;
+ *align_size = mapped_size_align;
+ }
+
+ return rc;
+}
+
+static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
+ size_t size)
+{
+ uintptr_t root_base_addr_align, sec_base_addr_align;
+ size_t root_mapped_size_align, sec_mapped_size_align;
+ int rc;
+
+ assert(root_base_addr != 0UL);
+ assert(sec_base_addr != 0UL);
+ assert(size != 0UL);
+
+ /* Map the memory with required attributes */
+ rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
+ &root_base_addr_align,
+ &root_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
+ root_base_addr, rc);
+ panic();
+ }
+
+ rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
+ &sec_base_addr_align, &sec_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while mapping",
+ "secure region", sec_base_addr, rc);
+ panic();
+ }
+
+ /* Do copy operation */
+ (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
+
+ /* Unmap root memory region */
+ rc = mmap_remove_dynamic_region(root_base_addr_align,
+ root_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while unmapping",
+ "root region", root_base_addr_align, rc);
+ panic();
+ }
+
+ /* Unmap secure memory region */
+ rc = mmap_remove_dynamic_region(sec_base_addr_align,
+ sec_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while unmapping",
+ "secure region", sec_base_addr_align, rc);
+ panic();
+ }
+}
+#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
+
/*******************************************************************************
* Loads SPMC manifest and inits SPMC.
******************************************************************************/
@@ -254,6 +344,7 @@
unsigned int core_id;
uint32_t ep_attr, flags;
int rc;
+ const struct dyn_cfg_dtb_info_t *image_info __unused;
/* Load the SPM Core manifest */
rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
@@ -344,6 +435,26 @@
DISABLE_ALL_EXCEPTIONS);
}
+#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
+ image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
+ assert(image_info != NULL);
+
+ if ((image_info->config_addr == 0UL) ||
+ (image_info->secondary_config_addr == 0UL) ||
+ (image_info->config_max_size == 0UL)) {
+ return -EINVAL;
+ }
+
+ /* Copy manifest from root->secure region */
+ spmd_do_sec_cpy(image_info->config_addr,
+ image_info->secondary_config_addr,
+ image_info->config_max_size);
+
+ /* Update ep info of BL32 */
+ assert(spmc_ep_info != NULL);
+ spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
+#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
+
/* Set an initial SPMC context state for all cores. */
for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
spm_core_context[core_id].state = SPMC_STATE_OFF;
diff --git a/tools/cert_create/src/key.c b/tools/cert_create/src/key.c
index 487777b..27ec979 100644
--- a/tools/cert_create/src/key.c
+++ b/tools/cert_create/src/key.c
@@ -212,7 +212,7 @@
*err_code = KEY_ERR_OPEN;
}
} else {
- WARN("Key filename not specified\n");
+ VERBOSE("Key filename not specified\n");
*err_code = KEY_ERR_FILENAME;
}