Merge pull request #717 from sandrine-bailleux-arm/sb/foundation-fvp-v10
Whitelist version 9.6 of Foundation FVP
diff --git a/Makefile b/Makefile
index 49d7cc4..2b630b3 100644
--- a/Makefile
+++ b/Makefile
@@ -115,7 +115,8 @@
# Whether code and read-only data should be put on separate memory pages.
# The platform Makefile is free to override this value.
SEPARATE_CODE_AND_RODATA := 0
-
+# Flag to enable new version of image loading
+LOAD_IMAGE_V2 := 0
################################################################################
# Checkpatch script options
@@ -187,6 +188,10 @@
FWU_FIP_DEPS += fwu_certificates
endif
+# For AArch32, enable new version of image loading.
+ifeq (${ARCH},aarch32)
+ LOAD_IMAGE_V2 := 1
+endif
################################################################################
# Toolchain
@@ -355,6 +360,21 @@
endif
endif
+# TRUSTED_BOARD_BOOT is currently not supported when LOAD_IMAGE_V2 is enabled.
+ifeq (${LOAD_IMAGE_V2},1)
+ ifeq (${TRUSTED_BOARD_BOOT},1)
+ $(error "TRUSTED_BOARD_BOOT is currently not supported \
+ for LOAD_IMAGE_V2=1")
+ endif
+endif
+
+# For AArch32, LOAD_IMAGE_V2 must be enabled.
+ifeq (${ARCH},aarch32)
+ ifeq (${LOAD_IMAGE_V2}, 0)
+ $(error "For AArch32, LOAD_IMAGE_V2 must be enabled.")
+ endif
+endif
+
################################################################################
# Process platform overrideable behaviour
@@ -445,6 +465,7 @@
$(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT))
$(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA))
+$(eval $(call assert_boolean,LOAD_IMAGE_V2))
################################################################################
@@ -475,6 +496,7 @@
$(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT))
$(eval $(call add_define,SEPARATE_CODE_AND_RODATA))
+$(eval $(call add_define,LOAD_IMAGE_V2))
# Define the EL3_PAYLOAD_BASE flag only if it is provided.
ifdef EL3_PAYLOAD_BASE
$(eval $(call add_define,EL3_PAYLOAD_BASE))
@@ -495,8 +517,6 @@
################################################################################
# Include BL specific makefiles
################################################################################
-# BL31 is not needed and BL1, BL2 & BL2U are not currently supported in AArch32
-ifneq (${ARCH},aarch32)
ifdef BL1_SOURCES
NEED_BL1 := yes
include bl1/bl1.mk
@@ -507,6 +527,8 @@
include bl2/bl2.mk
endif
+# For AArch32, BL31 is not applicable, and BL2U is not supported at present.
+ifneq (${ARCH},aarch32)
ifdef BL2U_SOURCES
NEED_BL2U := yes
include bl2u/bl2u.mk
diff --git a/bl1/aarch32/bl1_arch_setup.c b/bl1/aarch32/bl1_arch_setup.c
new file mode 100644
index 0000000..6b906c7
--- /dev/null
+++ b/bl1/aarch32/bl1_arch_setup.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*******************************************************************************
+ * TODO: Function that does the first bit of architectural setup.
+ ******************************************************************************/
+void bl1_arch_setup(void)
+{
+
+}
diff --git a/bl1/aarch32/bl1_context_mgmt.c b/bl1/aarch32/bl1_context_mgmt.c
new file mode 100644
index 0000000..a369097
--- /dev/null
+++ b/bl1/aarch32/bl1_context_mgmt.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <smcc_helpers.h>
+
+/*
+ * Following arrays will be used for context management.
+ * There are 2 instances, for the Secure and Non-Secure contexts.
+ */
+static cpu_context_t bl1_cpu_context[2];
+static smc_ctx_t bl1_smc_context[2];
+
+/* Following contains the next cpu context pointer. */
+static void *bl1_next_cpu_context_ptr;
+
+/* Following contains the next smc context pointer. */
+static void *bl1_next_smc_context_ptr;
+
+/* Following functions are used for SMC context handling */
+void *smc_get_ctx(int security_state)
+{
+ assert(sec_state_is_valid(security_state));
+ return &bl1_smc_context[security_state];
+}
+
+void smc_set_next_ctx(int security_state)
+{
+ assert(sec_state_is_valid(security_state));
+ bl1_next_smc_context_ptr = &bl1_smc_context[security_state];
+}
+
+void *smc_get_next_ctx(void)
+{
+ return bl1_next_smc_context_ptr;
+}
+
+/* Following functions are used for CPU context handling */
+void *cm_get_context(uint32_t security_state)
+{
+ assert(sec_state_is_valid(security_state));
+ return &bl1_cpu_context[security_state];
+}
+
+void cm_set_next_context(void *cpu_context)
+{
+ assert(cpu_context);
+ bl1_next_cpu_context_ptr = cpu_context;
+}
+
+void *cm_get_next_context(void)
+{
+ return bl1_next_cpu_context_ptr;
+}
+
+/*******************************************************************************
+ * Following function copies GP regs r0-r4, lr and spsr,
+ * from the CPU context to the SMC context structures.
+ ******************************************************************************/
+static void copy_cpu_ctx_to_smc_ctx(const regs_t *cpu_reg_ctx,
+ smc_ctx_t *next_smc_ctx)
+{
+ next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
+ next_smc_ctx->r1 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R1);
+ next_smc_ctx->r2 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R2);
+ next_smc_ctx->r3 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R3);
+ next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
+ next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+}
+
+/*******************************************************************************
+ * Following function flushes the SMC & CPU context pointer and its data.
+ ******************************************************************************/
+static void flush_smc_and_cpu_ctx(void)
+{
+ flush_dcache_range((uintptr_t)&bl1_next_smc_context_ptr,
+ sizeof(bl1_next_smc_context_ptr));
+ flush_dcache_range((uintptr_t)bl1_next_smc_context_ptr,
+ sizeof(smc_ctx_t));
+
+ flush_dcache_range((uintptr_t)&bl1_next_cpu_context_ptr,
+ sizeof(bl1_next_cpu_context_ptr));
+ flush_dcache_range((uintptr_t)bl1_next_cpu_context_ptr,
+ sizeof(cpu_context_t));
+}
+
+/*******************************************************************************
+ * This function prepares the context for Secure/Normal world images.
+ * Normal world images are transitioned to HYP(if supported) else SVC.
+ ******************************************************************************/
+void bl1_prepare_next_image(unsigned int image_id)
+{
+ unsigned int security_state;
+ image_desc_t *image_desc;
+ entry_point_info_t *next_bl_ep;
+
+ /* Get the image descriptor. */
+ image_desc = bl1_plat_get_image_desc(image_id);
+ assert(image_desc);
+
+ /* Get the entry point info. */
+ next_bl_ep = &image_desc->ep_info;
+
+ /* Get the image security state. */
+ security_state = GET_SECURITY_STATE(next_bl_ep->h.attr);
+
+ /* Prepare the SPSR for the next BL image. */
+ if (security_state == SECURE) {
+ next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+ SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+ } else {
+ /* Use HYP mode if supported else use SVC. */
+ if (GET_VIRT_EXT(read_id_pfr1())) {
+ next_bl_ep->spsr = SPSR_MODE32(MODE32_hyp, SPSR_T_ARM,
+ SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+ } else {
+ next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+ SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+ }
+ }
+
+ /* Allow platform to make change */
+ bl1_plat_set_ep_info(image_id, next_bl_ep);
+
+ /* Prepare the cpu context for the next BL image. */
+ cm_init_my_context(next_bl_ep);
+ cm_prepare_el3_exit(security_state);
+ cm_set_next_context(cm_get_context(security_state));
+
+ /* Prepare the smc context for the next BL image. */
+ smc_set_next_ctx(security_state);
+ copy_cpu_ctx_to_smc_ctx(get_regs_ctx(cm_get_next_context()),
+ smc_get_next_ctx());
+
+ /*
+ * Flush the SMC & CPU context and the (next)pointers,
+ * to access them after caches are disabled.
+ */
+ flush_smc_and_cpu_ctx();
+
+ /* Indicate that image is in execution state. */
+ image_desc->state = IMAGE_STATE_EXECUTED;
+
+ print_entry_point_info(next_bl_ep);
+}
diff --git a/bl1/aarch32/bl1_entrypoint.S b/bl1/aarch32/bl1_entrypoint.S
new file mode 100644
index 0000000..b881786
--- /dev/null
+++ b/bl1/aarch32/bl1_entrypoint.S
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+#include <el3_common_macros.S>
+#include <smcc_helpers.h>
+#include <smcc_macros.S>
+
+ .globl bl1_vector_table
+ .globl bl1_entrypoint
+
+ /* -----------------------------------------------------
+ * Setup the vector table to support SVC & MON mode.
+ * -----------------------------------------------------
+ */
+vector_base bl1_vector_table
+ b bl1_entrypoint
+ b report_exception /* Undef */
+ b bl1_aarch32_smc_handler /* SMC call */
+ b report_exception /* Prefetch abort */
+ b report_exception /* Data abort */
+ b report_exception /* Reserved */
+ b report_exception /* IRQ */
+ b report_exception /* FIQ */
+
+ /* -----------------------------------------------------
+ * bl1_entrypoint() is the entry point into the trusted
+ * firmware code when a cpu is released from warm or
+ * cold reset.
+ * -----------------------------------------------------
+ */
+
+func bl1_entrypoint
+/* ---------------------------------------------------------------------
+* If the reset address is programmable then bl1_entrypoint() is
+* executed only on the cold boot path. Therefore, we can skip the warm
+* boot mailbox mechanism.
+* ---------------------------------------------------------------------
+*/
+ el3_entrypoint_common \
+ _set_endian=1 \
+ _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
+ _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
+ _init_memory=1 \
+ _init_c_runtime=1 \
+ _exception_vectors=bl1_vector_table
+
+ /* -----------------------------------------------------
+ * Perform early platform setup & platform
+ * specific early arch. setup e.g. mmu setup
+ * -----------------------------------------------------
+ */
+ bl bl1_early_platform_setup
+ bl bl1_plat_arch_setup
+
+ /* -----------------------------------------------------
+ * Jump to main function.
+ * -----------------------------------------------------
+ */
+ bl bl1_main
+
+ /* -----------------------------------------------------
+ * Jump to next image.
+ * -----------------------------------------------------
+ */
+
+ /*
+ * MMU needs to be disabled because both BL1 and BL2 execute
+ * in PL1, and therefore share the same address space.
+ * BL2 will initialize the address space according to its
+ * own requirement.
+ */
+ bl disable_mmu_icache_secure
+ stcopr r0, TLBIALL
+ dsb sy
+ isb
+
+ /* Get the cpu_context for next BL image */
+ bl cm_get_next_context
+
+ /* Restore the SCR */
+ ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
+ stcopr r2, SCR
+ isb
+
+ /*
+ * Get the smc_context for next BL image,
+ * program the gp/system registers and exit
+ * secure monitor mode
+ */
+ bl smc_get_next_ctx
+ smcc_restore_gp_mode_regs
+ eret
+endfunc bl1_entrypoint
diff --git a/bl1/aarch32/bl1_exceptions.S b/bl1/aarch32/bl1_exceptions.S
new file mode 100644
index 0000000..e109e9f
--- /dev/null
+++ b/bl1/aarch32/bl1_exceptions.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl1.h>
+#include <bl_common.h>
+
+ .globl bl1_aarch32_smc_handler
+
+
+func bl1_aarch32_smc_handler
+ /* ------------------------------------------------
+ * SMC in BL1 is handled assuming that the MMU is
+ * turned off by BL2.
+ * ------------------------------------------------
+ */
+
+ /* ----------------------------------------------
+ * Only RUN_IMAGE SMC is supported.
+ * ----------------------------------------------
+ */
+ mov r8, #BL1_SMC_RUN_IMAGE
+ cmp r8, r0
+ blne report_exception
+
+ /* ------------------------------------------------
+ * Make sure only Secure world reaches here.
+ * ------------------------------------------------
+ */
+ ldcopr r8, SCR
+ tst r8, #SCR_NS_BIT
+ blne report_exception
+
+ /* ---------------------------------------------------------------------
+ * Pass control to next secure image.
+ * Here it expects r1 to contain the address of a entry_point_info_t
+ * structure describing the BL entrypoint.
+ * ---------------------------------------------------------------------
+ */
+ mov r8, r1
+ mov r0, r1
+ bl bl1_print_next_bl_ep_info
+
+#if SPIN_ON_BL1_EXIT
+ bl print_debug_loop_message
+debug_loop:
+ b debug_loop
+#endif
+
+ mov r0, r8
+ bl bl1_plat_prepare_exit
+
+ stcopr r0, TLBIALL
+ dsb sy
+ isb
+
+ /*
+ * Extract PC and SPSR based on struct `entry_point_info_t`
+ * and load it in LR and SPSR registers respectively.
+ */
+ ldr lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
+ ldr r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
+ msr spsr, r1
+
+ add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
+ ldm r8, {r0, r1, r2, r3}
+ eret
+endfunc bl1_aarch32_smc_handler
diff --git a/bl1/bl1_context_mgmt.c b/bl1/aarch64/bl1_context_mgmt.c
similarity index 100%
rename from bl1/bl1_context_mgmt.c
rename to bl1/aarch64/bl1_context_mgmt.c
diff --git a/bl1/aarch64/bl1_exceptions.S b/bl1/aarch64/bl1_exceptions.S
index f080fe8..869261d 100644
--- a/bl1/aarch64/bl1_exceptions.S
+++ b/bl1/aarch64/bl1_exceptions.S
@@ -192,15 +192,15 @@
mov sp, x30
/* ---------------------------------------------------------------------
- * Pass EL3 control to BL31.
+ * Pass EL3 control to next BL image.
* Here it expects X1 with the address of a entry_point_info_t
- * structure describing the BL31 entrypoint.
+ * structure describing the next BL image entrypoint.
* ---------------------------------------------------------------------
*/
mov x20, x1
mov x0, x20
- bl bl1_print_bl31_ep_info
+ bl bl1_print_next_bl_ep_info
ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
msr elr_el3, x0
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index 591e047..9ef5b40 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -29,15 +29,19 @@
#
BL1_SOURCES += bl1/bl1_main.c \
- bl1/aarch64/bl1_arch_setup.c \
- bl1/aarch64/bl1_entrypoint.S \
- bl1/aarch64/bl1_exceptions.S \
- bl1/bl1_context_mgmt.c \
- lib/cpus/aarch64/cpu_helpers.S \
- lib/el3_runtime/aarch64/context.S \
- lib/el3_runtime/aarch64/context_mgmt.c \
+ bl1/${ARCH}/bl1_arch_setup.c \
+ bl1/${ARCH}/bl1_context_mgmt.c \
+ bl1/${ARCH}/bl1_entrypoint.S \
+ bl1/${ARCH}/bl1_exceptions.S \
+ lib/cpus/${ARCH}/cpu_helpers.S \
+ lib/el3_runtime/${ARCH}/context_mgmt.c \
plat/common/plat_bl1_common.c
+
+ifeq (${ARCH},aarch64)
+BL1_SOURCES += lib/el3_runtime/aarch64/context.S
+endif
+
ifeq (${TRUSTED_BOARD_BOOT},1)
BL1_SOURCES += bl1/bl1_fwu.c
endif
diff --git a/bl1/bl1_main.c b/bl1/bl1_main.c
index cb1bc18..fbb75e0 100644
--- a/bl1/bl1_main.c
+++ b/bl1/bl1_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -64,11 +64,19 @@
void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
meminfo_t *bl2_mem_layout)
{
- const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
assert(bl1_mem_layout != NULL);
assert(bl2_mem_layout != NULL);
+#if LOAD_IMAGE_V2
+ /*
+ * Remove BL1 RW data from the scope of memory visible to BL2.
+ * This is assuming BL1 RW data is at the top of bl1_mem_layout.
+ */
+ assert(BL1_RW_BASE > bl1_mem_layout->total_base);
+ bl2_mem_layout->total_base = bl1_mem_layout->total_base;
+ bl2_mem_layout->total_size = BL1_RW_BASE - bl1_mem_layout->total_base;
+#else
/* Check that BL1's memory is lying outside of the free memory */
assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) ||
(BL1_RAM_BASE >= bl1_mem_layout->free_base +
@@ -79,7 +87,8 @@
reserve_mem(&bl2_mem_layout->total_base,
&bl2_mem_layout->total_size,
BL1_RAM_BASE,
- bl1_size);
+ BL1_RAM_LIMIT - BL1_RAM_BASE);
+#endif /* LOAD_IMAGE_V2 */
flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t));
}
@@ -98,15 +107,20 @@
NOTICE("BL1: %s\n", version_string);
NOTICE("BL1: %s\n", build_message);
- INFO("BL1: RAM 0x%lx - 0x%lx\n", BL1_RAM_BASE, BL1_RAM_LIMIT);
+ INFO("BL1: RAM %p - %p\n", (void *)BL1_RAM_BASE,
+ (void *)BL1_RAM_LIMIT);
#if DEBUG
- unsigned long val;
+ u_register_t val;
/*
* Ensure that MMU/Caches and coherency are turned on
*/
+#ifdef AARCH32
+ val = read_sctlr();
+#else
val = read_sctlr_el3();
+#endif
assert(val & SCTLR_M_BIT);
assert(val & SCTLR_C_BIT);
assert(val & SCTLR_I_BIT);
@@ -182,6 +196,9 @@
INFO("BL1: Loading BL2\n");
+#if LOAD_IMAGE_V2
+ err = load_auth_image(BL2_IMAGE_ID, image_info);
+#else
/* Load the BL2 image */
err = load_auth_image(bl1_tzram_layout,
BL2_IMAGE_ID,
@@ -189,6 +206,8 @@
image_info,
ep_info);
+#endif /* LOAD_IMAGE_V2 */
+
if (err) {
ERROR("Failed to load BL2 firmware.\n");
plat_error_handler(err);
@@ -201,24 +220,33 @@
* to BL2. BL2 will read the memory layout before using its
* memory for other purposes.
*/
+#if LOAD_IMAGE_V2
+ bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->total_base;
+#else
bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->free_base;
+#endif /* LOAD_IMAGE_V2 */
+
bl1_init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout);
- ep_info->args.arg1 = (unsigned long)bl2_tzram_layout;
+ ep_info->args.arg1 = (uintptr_t)bl2_tzram_layout;
NOTICE("BL1: Booting BL2\n");
- VERBOSE("BL1: BL2 memory layout address = 0x%llx\n",
- (unsigned long long) bl2_tzram_layout);
+ VERBOSE("BL1: BL2 memory layout address = %p\n",
+ (void *) bl2_tzram_layout);
}
/*******************************************************************************
- * Function called just before handing over to BL31 to inform the user about
- * the boot progress. In debug mode, also print details about the BL31 image's
- * execution context.
+ * Function called just before handing over to the next BL to inform the user
+ * about the boot progress. In debug mode, also print details about the BL
+ * image's execution context.
******************************************************************************/
-void bl1_print_bl31_ep_info(const entry_point_info_t *bl31_ep_info)
+void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info)
{
+#ifdef AARCH32
+ NOTICE("BL1: Booting BL32\n");
+#else
NOTICE("BL1: Booting BL31\n");
- print_entry_point_info(bl31_ep_info);
+#endif /* AARCH32 */
+ print_entry_point_info(bl_ep_info);
}
#if SPIN_ON_BL1_EXIT
diff --git a/bl1/bl1_private.h b/bl1/bl1_private.h
index 79dde73..2ef8d0e 100644
--- a/bl1/bl1_private.h
+++ b/bl1/bl1_private.h
@@ -37,13 +37,13 @@
* Declarations of linker defined symbols which will tell us where BL1 lives
* in Trusted ROM and RAM
******************************************************************************/
-extern uint64_t __BL1_ROM_END__;
-#define BL1_ROM_END (uint64_t)(&__BL1_ROM_END__)
+extern uintptr_t __BL1_ROM_END__;
+#define BL1_ROM_END (uintptr_t)(&__BL1_ROM_END__)
-extern uint64_t __BL1_RAM_START__;
-extern uint64_t __BL1_RAM_END__;
-#define BL1_RAM_BASE (uint64_t)(&__BL1_RAM_START__)
-#define BL1_RAM_LIMIT (uint64_t)(&__BL1_RAM_END__)
+extern uintptr_t __BL1_RAM_START__;
+extern uintptr_t __BL1_RAM_END__;
+#define BL1_RAM_BASE (uintptr_t)(&__BL1_RAM_START__)
+#define BL1_RAM_LIMIT (uintptr_t)(&__BL1_RAM_END__)
/******************************************
* Function prototypes
diff --git a/bl2/aarch32/bl2_arch_setup.c b/bl2/aarch32/bl2_arch_setup.c
new file mode 100644
index 0000000..665c29c
--- /dev/null
+++ b/bl2/aarch32/bl2_arch_setup.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*******************************************************************************
+ * Place holder function to perform any Secure SVC specific architectural
+ * setup. At the moment there is nothing to do.
+ ******************************************************************************/
+void bl2_arch_setup(void)
+{
+
+}
diff --git a/bl2/aarch32/bl2_entrypoint.S b/bl2/aarch32/bl2_entrypoint.S
new file mode 100644
index 0000000..6c620e2
--- /dev/null
+++ b/bl2/aarch32/bl2_entrypoint.S
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+
+
+ .globl bl2_vector_table
+ .globl bl2_entrypoint
+
+
+vector_base bl2_vector_table
+ b bl2_entrypoint
+ b report_exception /* Undef */
+ b report_exception /* SVC call */
+ b report_exception /* Prefetch abort */
+ b report_exception /* Data abort */
+ b report_exception /* Reserved */
+ b report_exception /* IRQ */
+ b report_exception /* FIQ */
+
+
+func bl2_entrypoint
+ /*---------------------------------------------
+ * Save from r1 the extents of the trusted ram
+ * available to BL2 for future use.
+ * r0 is not currently used.
+ * ---------------------------------------------
+ */
+ mov r11, r1
+
+ /* ---------------------------------------------
+ * Set the exception vector to something sane.
+ * ---------------------------------------------
+ */
+ ldr r0, =bl2_vector_table
+ stcopr r0, VBAR
+ isb
+
+ /* -----------------------------------------------------
+ * Enable the instruction cache
+ * -----------------------------------------------------
+ */
+ ldcopr r0, SCTLR
+ orr r0, r0, #SCTLR_I_BIT
+ stcopr r0, SCTLR
+ isb
+
+ /* ---------------------------------------------
+ * Since BL2 executes after BL1, it is assumed
+ * here that BL1 has already has done the
+ * necessary register initializations.
+ * ---------------------------------------------
+ */
+
+ /* ---------------------------------------------
+ * Invalidate the RW memory used by the BL2
+ * image. This includes the data and NOBITS
+ * sections. This is done to safeguard against
+ * possible corruption of this memory by dirty
+ * cache lines in a system cache as a result of
+ * use by an earlier boot loader stage.
+ * ---------------------------------------------
+ */
+ ldr r0, =__RW_START__
+ ldr r1, =__RW_END__
+ sub r1, r1, r0
+ bl inv_dcache_range
+
+ /* ---------------------------------------------
+ * Zero out NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section.
+ * ---------------------------------------------
+ */
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_SIZE__
+ bl zeromem
+
+#if USE_COHERENT_MEM
+ ldr r0, =__COHERENT_RAM_START__
+ ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+ bl zeromem
+#endif
+
+ /* --------------------------------------------
+ * Allocate a stack whose memory will be marked
+ * as Normal-IS-WBWA when the MMU is enabled.
+ * There is no risk of reading stale stack
+ * memory after enabling the MMU as only the
+ * primary cpu is running at the moment.
+ * --------------------------------------------
+ */
+ bl plat_set_my_stack
+
+ /* ---------------------------------------------
+ * Perform early platform setup & platform
+ * specific early arch. setup e.g. mmu setup
+ * ---------------------------------------------
+ */
+ mov r0, r11
+ bl bl2_early_platform_setup
+ bl bl2_plat_arch_setup
+
+ /* ---------------------------------------------
+ * Jump to main function.
+ * ---------------------------------------------
+ */
+ bl bl2_main
+
+ /* ---------------------------------------------
+ * Should never reach this point.
+ * ---------------------------------------------
+ */
+ bl plat_panic_handler
+
+endfunc bl2_entrypoint
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index d790738..f823ef4 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -29,9 +29,18 @@
#
BL2_SOURCES += bl2/bl2_main.c \
- bl2/aarch64/bl2_entrypoint.S \
- bl2/aarch64/bl2_arch_setup.c \
- common/aarch64/early_exceptions.S \
- lib/locks/exclusive/aarch64/spinlock.S
+ bl2/${ARCH}/bl2_entrypoint.S \
+ bl2/${ARCH}/bl2_arch_setup.c \
+ lib/locks/exclusive/${ARCH}/spinlock.S
+
+ifeq (${ARCH},aarch64)
+BL2_SOURCES += common/aarch64/early_exceptions.S
+endif
+
+ifeq (${LOAD_IMAGE_V2},1)
+BL2_SOURCES += bl2/bl2_image_load_v2.c
+else
+BL2_SOURCES += bl2/bl2_image_load.c
+endif
BL2_LINKERFILE := bl2/bl2.ld.S
diff --git a/bl2/bl2_image_load.c b/bl2/bl2_image_load.c
new file mode 100644
index 0000000..ee0eb96
--- /dev/null
+++ b/bl2/bl2_image_load.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <auth_mod.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*
+ * Check for platforms that use obsolete image terminology
+ */
+#ifdef BL30_BASE
+# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE"
+#endif
+
+/*******************************************************************************
+ * Load the SCP_BL2 image if there's one.
+ * If a platform does not want to attempt to load SCP_BL2 image it must leave
+ * SCP_BL2_BASE undefined.
+ * Return 0 on success or if there's no SCP_BL2 image to load, a negative error
+ * code otherwise.
+ ******************************************************************************/
+static int load_scp_bl2(void)
+{
+ int e = 0;
+#ifdef SCP_BL2_BASE
+ meminfo_t scp_bl2_mem_info;
+ image_info_t scp_bl2_image_info;
+
+ /*
+ * It is up to the platform to specify where SCP_BL2 should be loaded if
+ * it exists. It could create space in the secure sram or point to a
+ * completely different memory.
+ *
+ * The entry point information is not relevant in this case as the AP
+ * won't execute the SCP_BL2 image.
+ */
+ INFO("BL2: Loading SCP_BL2\n");
+ bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info);
+ scp_bl2_image_info.h.version = VERSION_1;
+ e = load_auth_image(&scp_bl2_mem_info,
+ SCP_BL2_IMAGE_ID,
+ SCP_BL2_BASE,
+ &scp_bl2_image_info,
+ NULL);
+
+ if (e == 0) {
+ /* The subsequent handling of SCP_BL2 is platform specific */
+ e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info);
+ if (e) {
+ ERROR("Failure in platform-specific handling of SCP_BL2 image.\n");
+ }
+ }
+#endif /* SCP_BL2_BASE */
+
+ return e;
+}
+
+#ifndef EL3_PAYLOAD_BASE
+/*******************************************************************************
+ * Load the BL31 image.
+ * The bl2_to_bl31_params and bl31_ep_info params will be updated with the
+ * relevant BL31 information.
+ * Return 0 on success, a negative error code otherwise.
+ ******************************************************************************/
+static int load_bl31(bl31_params_t *bl2_to_bl31_params,
+ entry_point_info_t *bl31_ep_info)
+{
+ meminfo_t *bl2_tzram_layout;
+ int e;
+
+ INFO("BL2: Loading BL31\n");
+ assert(bl2_to_bl31_params != NULL);
+ assert(bl31_ep_info != NULL);
+
+ /* Find out how much free trusted ram remains after BL2 load */
+ bl2_tzram_layout = bl2_plat_sec_mem_layout();
+
+ /* Set the X0 parameter to BL31 */
+ bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params;
+
+ /* Load the BL31 image */
+ e = load_auth_image(bl2_tzram_layout,
+ BL31_IMAGE_ID,
+ BL31_BASE,
+ bl2_to_bl31_params->bl31_image_info,
+ bl31_ep_info);
+
+ if (e == 0) {
+ bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
+ bl31_ep_info);
+ }
+
+ return e;
+}
+
+/*******************************************************************************
+ * Load the BL32 image if there's one.
+ * The bl2_to_bl31_params param will be updated with the relevant BL32
+ * information.
+ * If a platform does not want to attempt to load BL32 image it must leave
+ * BL32_BASE undefined.
+ * Return 0 on success or if there's no BL32 image to load, a negative error
+ * code otherwise.
+ ******************************************************************************/
+static int load_bl32(bl31_params_t *bl2_to_bl31_params)
+{
+ int e = 0;
+#ifdef BL32_BASE
+ meminfo_t bl32_mem_info;
+
+ INFO("BL2: Loading BL32\n");
+ assert(bl2_to_bl31_params != NULL);
+
+ /*
+ * It is up to the platform to specify where BL32 should be loaded if
+ * it exists. It could create space in the secure sram or point to a
+ * completely different memory.
+ */
+ bl2_plat_get_bl32_meminfo(&bl32_mem_info);
+ e = load_auth_image(&bl32_mem_info,
+ BL32_IMAGE_ID,
+ BL32_BASE,
+ bl2_to_bl31_params->bl32_image_info,
+ bl2_to_bl31_params->bl32_ep_info);
+
+ if (e == 0) {
+ bl2_plat_set_bl32_ep_info(
+ bl2_to_bl31_params->bl32_image_info,
+ bl2_to_bl31_params->bl32_ep_info);
+ }
+#endif /* BL32_BASE */
+
+ return e;
+}
+
+#ifndef PRELOADED_BL33_BASE
+/*******************************************************************************
+ * Load the BL33 image.
+ * The bl2_to_bl31_params param will be updated with the relevant BL33
+ * information.
+ * Return 0 on success, a negative error code otherwise.
+ ******************************************************************************/
+static int load_bl33(bl31_params_t *bl2_to_bl31_params)
+{
+ meminfo_t bl33_mem_info;
+ int e;
+
+ INFO("BL2: Loading BL33\n");
+ assert(bl2_to_bl31_params != NULL);
+
+ bl2_plat_get_bl33_meminfo(&bl33_mem_info);
+
+ /* Load the BL33 image in non-secure memory provided by the platform */
+ e = load_auth_image(&bl33_mem_info,
+ BL33_IMAGE_ID,
+ plat_get_ns_image_entrypoint(),
+ bl2_to_bl31_params->bl33_image_info,
+ bl2_to_bl31_params->bl33_ep_info);
+
+ if (e == 0) {
+ bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
+ bl2_to_bl31_params->bl33_ep_info);
+ }
+
+ return e;
+}
+#endif /* PRELOADED_BL33_BASE */
+
+#endif /* EL3_PAYLOAD_BASE */
+
+/*******************************************************************************
+ * This function loads SCP_BL2/BL3x images and returns the ep_info for
+ * the next executable image.
+ ******************************************************************************/
+entry_point_info_t *bl2_load_images(void)
+{
+ bl31_params_t *bl2_to_bl31_params;
+ entry_point_info_t *bl31_ep_info;
+ int e;
+
+ e = load_scp_bl2();
+ if (e) {
+ ERROR("Failed to load SCP_BL2 (%i)\n", e);
+ plat_error_handler(e);
+ }
+
+ /* Perform platform setup in BL2 after loading SCP_BL2 */
+ bl2_platform_setup();
+
+ /*
+ * Get a pointer to the memory the platform has set aside to pass
+ * information to BL31.
+ */
+ bl2_to_bl31_params = bl2_plat_get_bl31_params();
+ bl31_ep_info = bl2_plat_get_bl31_ep_info();
+
+#ifdef EL3_PAYLOAD_BASE
+ /*
+ * In the case of an EL3 payload, we don't need to load any further
+ * images. Just update the BL31 entrypoint info structure to make BL1
+ * jump to the EL3 payload.
+ * The pointer to the memory the platform has set aside to pass
+ * information to BL31 in the normal boot flow is reused here, even
+ * though only a fraction of the information contained in the
+ * bl31_params_t structure makes sense in the context of EL3 payloads.
+ * This will be refined in the future.
+ */
+ INFO("BL2: Populating the entrypoint info for the EL3 payload\n");
+ bl31_ep_info->pc = EL3_PAYLOAD_BASE;
+ bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params;
+ bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info);
+#else
+ e = load_bl31(bl2_to_bl31_params, bl31_ep_info);
+ if (e) {
+ ERROR("Failed to load BL31 (%i)\n", e);
+ plat_error_handler(e);
+ }
+
+ e = load_bl32(bl2_to_bl31_params);
+ if (e) {
+ if (e == -EAUTH) {
+ ERROR("Failed to authenticate BL32\n");
+ plat_error_handler(e);
+ } else {
+ WARN("Failed to load BL32 (%i)\n", e);
+ }
+ }
+
+#ifdef PRELOADED_BL33_BASE
+ /*
+ * In this case, don't load the BL33 image as it's already loaded in
+ * memory. Update BL33 entrypoint information.
+ */
+ INFO("BL2: Populating the entrypoint info for the preloaded BL33\n");
+ bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE;
+ bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info);
+#else
+ e = load_bl33(bl2_to_bl31_params);
+ if (e) {
+ ERROR("Failed to load BL33 (%i)\n", e);
+ plat_error_handler(e);
+ }
+#endif /* PRELOADED_BL33_BASE */
+
+#endif /* EL3_PAYLOAD_BASE */
+
+ /* Flush the params to be passed to memory */
+ bl2_plat_flush_bl31_params();
+
+ return bl31_ep_info;
+}
diff --git a/bl2/bl2_image_load_v2.c b/bl2/bl2_image_load_v2.c
new file mode 100644
index 0000000..4fab655
--- /dev/null
+++ b/bl2/bl2_image_load_v2.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <auth_mod.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+
+/*******************************************************************************
+ * This function loads SCP_BL2/BL3x images and returns the ep_info for
+ * the next executable image.
+ ******************************************************************************/
+entry_point_info_t *bl2_load_images(void)
+{
+ bl_params_t *bl2_to_next_bl_params;
+ bl_load_info_t *bl2_load_info;
+ const bl_load_info_node_t *bl2_node_info;
+ int plat_setup_done = 0;
+ int err;
+
+ /*
+ * Get information about the images to load.
+ */
+ bl2_load_info = plat_get_bl_image_load_info();
+ assert(bl2_load_info);
+ assert(bl2_load_info->head);
+ assert(bl2_load_info->h.type == PARAM_BL_LOAD_INFO);
+ assert(bl2_load_info->h.version >= VERSION_2);
+ bl2_node_info = bl2_load_info->head;
+
+ while (bl2_node_info) {
+ /*
+ * Perform platform setup before loading the image,
+ * if indicated in the image attributes AND if NOT
+ * already done before.
+ */
+ if (bl2_node_info->image_info->h.attr & IMAGE_ATTRIB_PLAT_SETUP) {
+ if (plat_setup_done) {
+ WARN("BL2: Platform setup already done!!\n");
+ } else {
+ INFO("BL2: Doing platform setup\n");
+ bl2_platform_setup();
+ plat_setup_done = 1;
+ }
+ }
+
+ if (!(bl2_node_info->image_info->h.attr & IMAGE_ATTRIB_SKIP_LOADING)) {
+ INFO("BL2: Loading image id %d\n", bl2_node_info->image_id);
+ err = load_auth_image(bl2_node_info->image_id,
+ bl2_node_info->image_info);
+ if (err) {
+ ERROR("BL2: Failed to load image (%i)\n", err);
+ plat_error_handler(err);
+ }
+ } else {
+ INFO("BL2: Skip loading image id %d\n", bl2_node_info->image_id);
+ }
+
+ /* Allow platform to handle image information. */
+ err = bl2_plat_handle_post_image_load(bl2_node_info->image_id);
+ if (err) {
+ ERROR("BL2: Failure in post image load handling (%i)\n", err);
+ plat_error_handler(err);
+ }
+
+ /* Go to next image */
+ bl2_node_info = bl2_node_info->next_load_info;
+ }
+
+ /*
+ * Get information to pass to the next image.
+ */
+ bl2_to_next_bl_params = plat_get_next_bl_params();
+ assert(bl2_to_next_bl_params);
+ assert(bl2_to_next_bl_params->head);
+ assert(bl2_to_next_bl_params->h.type == PARAM_BL_PARAMS);
+ assert(bl2_to_next_bl_params->h.version >= VERSION_2);
+
+ /* Flush the parameters to be passed to next image */
+ plat_flush_next_bl_params();
+
+ return bl2_to_next_bl_params->head->ep_info;
+}
diff --git a/bl2/bl2_main.c b/bl2/bl2_main.c
index c8fd683..514c005 100644
--- a/bl2/bl2_main.c
+++ b/bl2/bl2_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,192 +28,23 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <arch.h>
#include <arch_helpers.h>
-#include <assert.h>
#include <auth_mod.h>
#include <bl1.h>
#include <bl_common.h>
#include <debug.h>
-#include <errno.h>
#include <platform.h>
-#include <platform_def.h>
-#include <stdint.h>
#include "bl2_private.h"
-/*
- * Check for platforms that use obsolete image terminology
- */
-#ifdef BL30_BASE
-# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE"
-#endif
-
-/*******************************************************************************
- * Load the SCP_BL2 image if there's one.
- * If a platform does not want to attempt to load SCP_BL2 image it must leave
- * SCP_BL2_BASE undefined.
- * Return 0 on success or if there's no SCP_BL2 image to load, a negative error
- * code otherwise.
- ******************************************************************************/
-static int load_scp_bl2(void)
-{
- int e = 0;
-#ifdef SCP_BL2_BASE
- meminfo_t scp_bl2_mem_info;
- image_info_t scp_bl2_image_info;
-
- /*
- * It is up to the platform to specify where SCP_BL2 should be loaded if
- * it exists. It could create space in the secure sram or point to a
- * completely different memory.
- *
- * The entry point information is not relevant in this case as the AP
- * won't execute the SCP_BL2 image.
- */
- INFO("BL2: Loading SCP_BL2\n");
- bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info);
- scp_bl2_image_info.h.version = VERSION_1;
- e = load_auth_image(&scp_bl2_mem_info,
- SCP_BL2_IMAGE_ID,
- SCP_BL2_BASE,
- &scp_bl2_image_info,
- NULL);
-
- if (e == 0) {
- /* The subsequent handling of SCP_BL2 is platform specific */
- e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info);
- if (e) {
- ERROR("Failure in platform-specific handling of SCP_BL2 image.\n");
- }
- }
-#endif /* SCP_BL2_BASE */
-
- return e;
-}
-
-#ifndef EL3_PAYLOAD_BASE
-/*******************************************************************************
- * Load the BL31 image.
- * The bl2_to_bl31_params and bl31_ep_info params will be updated with the
- * relevant BL31 information.
- * Return 0 on success, a negative error code otherwise.
- ******************************************************************************/
-static int load_bl31(bl31_params_t *bl2_to_bl31_params,
- entry_point_info_t *bl31_ep_info)
-{
- meminfo_t *bl2_tzram_layout;
- int e;
-
- INFO("BL2: Loading BL31\n");
- assert(bl2_to_bl31_params != NULL);
- assert(bl31_ep_info != NULL);
-
- /* Find out how much free trusted ram remains after BL2 load */
- bl2_tzram_layout = bl2_plat_sec_mem_layout();
-
- /* Set the X0 parameter to BL31 */
- bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params;
-
- /* Load the BL31 image */
- e = load_auth_image(bl2_tzram_layout,
- BL31_IMAGE_ID,
- BL31_BASE,
- bl2_to_bl31_params->bl31_image_info,
- bl31_ep_info);
-
- if (e == 0) {
- bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
- bl31_ep_info);
- }
-
- return e;
-}
-
-/*******************************************************************************
- * Load the BL32 image if there's one.
- * The bl2_to_bl31_params param will be updated with the relevant BL32
- * information.
- * If a platform does not want to attempt to load BL32 image it must leave
- * BL32_BASE undefined.
- * Return 0 on success or if there's no BL32 image to load, a negative error
- * code otherwise.
- ******************************************************************************/
-static int load_bl32(bl31_params_t *bl2_to_bl31_params)
-{
- int e = 0;
-#ifdef BL32_BASE
- meminfo_t bl32_mem_info;
-
- INFO("BL2: Loading BL32\n");
- assert(bl2_to_bl31_params != NULL);
-
- /*
- * It is up to the platform to specify where BL32 should be loaded if
- * it exists. It could create space in the secure sram or point to a
- * completely different memory.
- */
- bl2_plat_get_bl32_meminfo(&bl32_mem_info);
- e = load_auth_image(&bl32_mem_info,
- BL32_IMAGE_ID,
- BL32_BASE,
- bl2_to_bl31_params->bl32_image_info,
- bl2_to_bl31_params->bl32_ep_info);
-
- if (e == 0) {
- bl2_plat_set_bl32_ep_info(
- bl2_to_bl31_params->bl32_image_info,
- bl2_to_bl31_params->bl32_ep_info);
- }
-#endif /* BL32_BASE */
-
- return e;
-}
-
-#ifndef PRELOADED_BL33_BASE
-/*******************************************************************************
- * Load the BL33 image.
- * The bl2_to_bl31_params param will be updated with the relevant BL33
- * information.
- * Return 0 on success, a negative error code otherwise.
- ******************************************************************************/
-static int load_bl33(bl31_params_t *bl2_to_bl31_params)
-{
- meminfo_t bl33_mem_info;
- int e;
-
- INFO("BL2: Loading BL33\n");
- assert(bl2_to_bl31_params != NULL);
-
- bl2_plat_get_bl33_meminfo(&bl33_mem_info);
-
- /* Load the BL33 image in non-secure memory provided by the platform */
- e = load_auth_image(&bl33_mem_info,
- BL33_IMAGE_ID,
- plat_get_ns_image_entrypoint(),
- bl2_to_bl31_params->bl33_image_info,
- bl2_to_bl31_params->bl33_ep_info);
-
- if (e == 0) {
- bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
- bl2_to_bl31_params->bl33_ep_info);
- }
-
- return e;
-}
-#endif /* PRELOADED_BL33_BASE */
-
-#endif /* EL3_PAYLOAD_BASE */
/*******************************************************************************
* The only thing to do in BL2 is to load further images and pass control to
- * BL31. The memory occupied by BL2 will be reclaimed by BL3x stages. BL2 runs
- * entirely in S-EL1.
+ * next BL. The memory occupied by BL2 will be reclaimed by BL3x stages. BL2
+ * runs entirely in S-EL1.
******************************************************************************/
void bl2_main(void)
{
- bl31_params_t *bl2_to_bl31_params;
- entry_point_info_t *bl31_ep_info;
- int e;
+ entry_point_info_t *next_bl_ep_info;
NOTICE("BL2: %s\n", version_string);
NOTICE("BL2: %s\n", build_message);
@@ -226,82 +57,22 @@
auth_mod_init();
#endif /* TRUSTED_BOARD_BOOT */
- /*
- * Load the subsequent bootloader images
- */
- e = load_scp_bl2();
- if (e) {
- ERROR("Failed to load SCP_BL2 (%i)\n", e);
- plat_error_handler(e);
- }
-
- /* Perform platform setup in BL2 after loading SCP_BL2 */
- bl2_platform_setup();
+ /* Load the subsequent bootloader images. */
+ next_bl_ep_info = bl2_load_images();
+#ifdef AARCH32
/*
- * Get a pointer to the memory the platform has set aside to pass
- * information to BL31.
+ * For AArch32 state BL1 and BL2 share the MMU setup.
+ * Given that BL2 does not map BL1 regions, MMU needs
+ * to be disabled in order to go back to BL1.
*/
- bl2_to_bl31_params = bl2_plat_get_bl31_params();
- bl31_ep_info = bl2_plat_get_bl31_ep_info();
-
-#ifdef EL3_PAYLOAD_BASE
- /*
- * In the case of an EL3 payload, we don't need to load any further
- * images. Just update the BL31 entrypoint info structure to make BL1
- * jump to the EL3 payload.
- * The pointer to the memory the platform has set aside to pass
- * information to BL31 in the normal boot flow is reused here, even
- * though only a fraction of the information contained in the
- * bl31_params_t structure makes sense in the context of EL3 payloads.
- * This will be refined in the future.
- */
- INFO("BL2: Populating the entrypoint info for the EL3 payload\n");
- bl31_ep_info->pc = EL3_PAYLOAD_BASE;
- bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params;
- bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info);
-#else
- e = load_bl31(bl2_to_bl31_params, bl31_ep_info);
- if (e) {
- ERROR("Failed to load BL31 (%i)\n", e);
- plat_error_handler(e);
- }
-
- e = load_bl32(bl2_to_bl31_params);
- if (e) {
- if (e == -EAUTH) {
- ERROR("Failed to authenticate BL32\n");
- plat_error_handler(e);
- } else {
- WARN("Failed to load BL32 (%i)\n", e);
- }
- }
-
-#ifdef PRELOADED_BL33_BASE
- /*
- * In this case, don't load the BL33 image as it's already loaded in
- * memory. Update BL33 entrypoint information.
- */
- INFO("BL2: Populating the entrypoint info for the preloaded BL33\n");
- bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE;
- bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info);
-#else
- e = load_bl33(bl2_to_bl31_params);
- if (e) {
- ERROR("Failed to load BL33 (%i)\n", e);
- plat_error_handler(e);
- }
-#endif /* PRELOADED_BL33_BASE */
-
-#endif /* EL3_PAYLOAD_BASE */
-
- /* Flush the params to be passed to memory */
- bl2_plat_flush_bl31_params();
+ disable_mmu_icache_secure();
+#endif /* AARCH32 */
/*
- * Run BL31 via an SMC to BL1. Information on how to pass control to
- * the BL32 (if present) and BL33 software images will be passed to
- * BL31 as an argument.
+ * Run next BL image via an SMC to BL1. Information on how to pass
+ * control to the BL32 (if present) and BL33 software images will
+ * be passed to next BL image as an argument.
*/
- smc(BL1_SMC_RUN_IMAGE, (unsigned long)bl31_ep_info, 0, 0, 0, 0, 0, 0);
+ smc(BL1_SMC_RUN_IMAGE, (unsigned long)next_bl_ep_info, 0, 0, 0, 0, 0, 0);
}
diff --git a/bl2/bl2_private.h b/bl2/bl2_private.h
index 022d1e9..b339777 100644
--- a/bl2/bl2_private.h
+++ b/bl2/bl2_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,8 +32,14 @@
#define __BL2_PRIVATE_H__
/******************************************
+ * Forward declarations
+ *****************************************/
+struct entry_point_info;
+
+/******************************************
* Function prototypes
*****************************************/
void bl2_arch_setup(void);
+struct entry_point_info *bl2_load_images(void);
#endif /* __BL2_PRIVATE_H__ */
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index f95ef41..fae5ee4 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -53,18 +53,27 @@
******************************************************************************/
static uint32_t next_image_type = NON_SECURE;
+/*
+ * Implement the ARM Standard Service function to get arguments for a
+ * particular service.
+ */
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask)
+{
+ /* Setup the arguments for PSCI Library */
+ DEFINE_STATIC_PSCI_LIB_ARGS_V1(psci_args, bl31_warm_entrypoint);
+
+ /* PSCI is the only ARM Standard Service implemented */
+ assert(svc_mask == PSCI_FID_MASK);
+
+ return (uintptr_t)&psci_args;
+}
+
/*******************************************************************************
* Simple function to initialise all BL31 helper libraries.
******************************************************************************/
void bl31_lib_init(void)
{
cm_init();
-
- /*
- * Initialize the PSCI library here. This also does EL3 architectural
- * setup.
- */
- psci_setup((uintptr_t)bl31_warm_entrypoint);
}
/*******************************************************************************
@@ -86,7 +95,7 @@
/* Initialise helper libraries */
bl31_lib_init();
- /* Initialize the runtime services e.g. psci */
+ /* Initialize the runtime services e.g. psci. */
INFO("BL31: Initializing runtime services\n");
runtime_svc_init();
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
index 33d35b9..54f2ced 100644
--- a/bl32/sp_min/aarch32/entrypoint.S
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -32,6 +32,7 @@
#include <asm_macros.S>
#include <bl_common.h>
#include <context.h>
+#include <el3_common_macros.S>
#include <runtime_svc.h>
#include <smcc_helpers.h>
#include <smcc_macros.S>
@@ -41,7 +42,8 @@
.globl sp_min_entrypoint
.globl sp_min_warm_entrypoint
-func sp_min_vector_table
+
+vector_base sp_min_vector_table
b sp_min_entrypoint
b plat_panic_handler /* Undef */
b handle_smc /* Syscall */
@@ -50,185 +52,70 @@
b plat_panic_handler /* Reserved */
b plat_panic_handler /* IRQ */
b plat_panic_handler /* FIQ */
-endfunc sp_min_vector_table
-
-func handle_smc
- smcc_save_gp_mode_regs
-
- /* r0 points to smc_context */
- mov r2, r0 /* handle */
- ldcopr r0, SCR
-
- /* Save SCR in stack */
- push {r0}
- and r3, r0, #SCR_NS_BIT /* flags */
-
- /* Switch to Secure Mode*/
- bic r0, #SCR_NS_BIT
- stcopr r0, SCR
- isb
- ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
- /* Check whether an SMC64 is issued */
- tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
- beq 1f /* SMC32 is detected */
- mov r0, #SMC_UNK
- str r0, [r2, #SMC_CTX_GPREG_R0]
- mov r0, r2
- b 2f /* Skip handling the SMC */
-1:
- mov r1, #0 /* cookie */
- bl handle_runtime_svc
-2:
- /* r0 points to smc context */
-
- /* Restore SCR from stack */
- pop {r1}
- stcopr r1, SCR
- isb
- b sp_min_exit
-endfunc handle_smc
/*
* The Cold boot/Reset entrypoint for SP_MIN
*/
func sp_min_entrypoint
-
- /*
- * The caches and TLBs are disabled at reset. If any implementation
- * allows the caches/TLB to be hit while they are disabled, ensure
- * that they are invalidated here
+#if !RESET_TO_SP_MIN
+ /* ---------------------------------------------------------------
+ * Preceding bootloader has populated r0 with a pointer to a
+ * 'bl_params_t' structure & r1 with a pointer to platform
+ * specific structure
+ * ---------------------------------------------------------------
*/
+ mov r11, r0
+ mov r12, r1
- /* Make sure we are in Secure Mode*/
- ldcopr r0, SCR
- bic r0, #SCR_NS_BIT
- stcopr r0, SCR
- isb
-
- /* Switch to monitor mode */
- cps #MODE32_mon
- isb
-
- /*
- * Set sane values for NS SCTLR as well.
- * Switch to non secure mode for this.
+ /* ---------------------------------------------------------------------
+ * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
+ * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
+ * and primary/secondary CPU logic should not be executed in this case.
+ *
+ * Also, assume that the previous bootloader has already set up the CPU
+ * endianness and has initialised the memory.
+ * ---------------------------------------------------------------------
*/
- ldr r0, =(SCTLR_RES1)
- ldcopr r1, SCR
- orr r2, r1, #SCR_NS_BIT
- stcopr r2, SCR
- isb
+ el3_entrypoint_common \
+ _set_endian=0 \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=1 \
+ _exception_vectors=sp_min_vector_table
- ldcopr r2, SCTLR
- orr r0, r0, r2
- stcopr r0, SCTLR
- isb
-
- stcopr r1, SCR
- isb
-
- /*
- * Set the CPU endianness before doing anything that might involve
- * memory reads or writes.
+ /* ---------------------------------------------------------------------
+ * Relay the previous bootloader's arguments to the platform layer
+ * ---------------------------------------------------------------------
*/
- ldcopr r0, SCTLR
- bic r0, r0, #SCTLR_EE_BIT
- stcopr r0, SCTLR
- isb
-
- /* Run the CPU Specific Reset handler */
- bl reset_handler
-
- /*
- * Enable the instruction cache and data access
- * alignment checks
+ mov r0, r11
+ mov r1, r12
+#else
+ /* ---------------------------------------------------------------------
+ * For RESET_TO_SP_MIN systems which have a programmable reset address,
+ * sp_min_entrypoint() is executed only on the cold boot path so we can
+ * skip the warm boot mailbox mechanism.
+ * ---------------------------------------------------------------------
*/
- ldcopr r0, SCTLR
- ldr r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT)
- orr r0, r0, r1
- stcopr r0, SCTLR
- isb
-
- /* Set the vector tables */
- ldr r0, =sp_min_vector_table
- stcopr r0, VBAR
- stcopr r0, MVBAR
- isb
+ el3_entrypoint_common \
+ _set_endian=1 \
+ _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
+ _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
+ _init_memory=1 \
+ _init_c_runtime=1 \
+ _exception_vectors=sp_min_vector_table
- /*
- * Enable the SIF bit to disable instruction fetches
- * from Non-secure memory.
+ /* ---------------------------------------------------------------------
+ * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
+ * to run so there's no argument to relay from a previous bootloader.
+ * Zero the arguments passed to the platform layer to reflect that.
+ * ---------------------------------------------------------------------
*/
- ldcopr r0, SCR
- orr r0, r0, #SCR_SIF_BIT
- stcopr r0, SCR
+ mov r0, #0
+ mov r1, #0
+#endif /* RESET_TO_SP_MIN */
- /*
- * Enable the SError interrupt now that the exception vectors have been
- * setup.
- */
- cpsie a
- isb
-
- /* Enable access to Advanced SIMD registers */
- ldcopr r0, NSACR
- bic r0, r0, #NSASEDIS_BIT
- orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
- stcopr r0, NSACR
- isb
-
- /*
- * Enable access to Advanced SIMD, Floating point and to the Trace
- * functionality as well.
- */
- ldcopr r0, CPACR
- bic r0, r0, #ASEDIS_BIT
- bic r0, r0, #TRCDIS_BIT
- orr r0, r0, #CPACR_ENABLE_FP_ACCESS
- stcopr r0, CPACR
- isb
-
- vmrs r0, FPEXC
- orr r0, r0, #FPEXC_EN_BIT
- vmsr FPEXC, r0
-
- /* Detect whether Warm or Cold boot */
- bl plat_get_my_entrypoint
- cmp r0, #0
- /* If warm boot detected, jump to warm boot entry */
- bxne r0
-
- /* Setup C runtime stack */
- bl plat_set_my_stack
-
- /* Perform platform specific memory initialization */
- bl platform_mem_init
-
- /* Initialize the C Runtime Environment */
-
- /*
- * Invalidate the RW memory used by SP_MIN image. This includes
- * the data and NOBITS sections. This is done to safeguard against
- * possible corruption of this memory by dirty cache lines in a system
- * cache as a result of use by an earlier boot loader stage.
- */
- ldr r0, =__RW_START__
- ldr r1, =__RW_END__
- sub r1, r1, r0
- bl inv_dcache_range
-
- ldr r0, =__BSS_START__
- ldr r1, =__BSS_SIZE__
- bl zeromem
-
-#if USE_COHERENT_MEM
- ldr r0, =__COHERENT_RAM_START__
- ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
- bl zeromem
-#endif
-
- /* Perform platform specific early arch. setup */
bl sp_min_early_platform_setup
bl sp_min_plat_arch_setup
@@ -270,13 +157,76 @@
b sp_min_exit
endfunc sp_min_entrypoint
+
+/*
+ * SMC handling function for SP_MIN.
+ */
+func handle_smc
+ smcc_save_gp_mode_regs
+
+ /* r0 points to smc_context */
+ mov r2, r0 /* handle */
+ ldcopr r0, SCR
+
+ /* Save SCR in stack */
+ push {r0}
+ and r3, r0, #SCR_NS_BIT /* flags */
+
+ /* Switch to Secure Mode*/
+ bic r0, #SCR_NS_BIT
+ stcopr r0, SCR
+ isb
+ ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
+ /* Check whether an SMC64 is issued */
+ tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
+ beq 1f /* SMC32 is detected */
+ mov r0, #SMC_UNK
+ str r0, [r2, #SMC_CTX_GPREG_R0]
+ mov r0, r2
+ b 2f /* Skip handling the SMC */
+1:
+ mov r1, #0 /* cookie */
+ bl handle_runtime_svc
+2:
+ /* r0 points to smc context */
+
+ /* Restore SCR from stack */
+ pop {r1}
+ stcopr r1, SCR
+ isb
+
+ b sp_min_exit
+endfunc handle_smc
+
+
/*
* The Warm boot entrypoint for SP_MIN.
*/
func sp_min_warm_entrypoint
-
- /* Setup C runtime stack */
- bl plat_set_my_stack
+ /*
+ * On the warm boot path, most of the EL3 initialisations performed by
+ * 'el3_entrypoint_common' must be skipped:
+ *
+ * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
+ * programming the reset address do we need to set the CPU endianness.
+ * In other cases, we assume this has been taken care by the
+ * entrypoint code.
+ *
+ * - No need to determine the type of boot, we know it is a warm boot.
+ *
+ * - Do not try to distinguish between primary and secondary CPUs, this
+ * notion only exists for a cold boot.
+ *
+ * - No need to initialise the memory or the C runtime environment,
+ * it has been done once and for all on the cold boot path.
+ */
+ el3_entrypoint_common \
+ _set_endian=PROGRAMMABLE_RESET_ADDRESS \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=0 \
+ _exception_vectors=sp_min_vector_table
/* --------------------------------------------
* Enable the MMU with the DCache disabled. It
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
index b158db1..e0e23e8 100644
--- a/bl32/sp_min/sp_min.ld.S
+++ b/bl32/sp_min/sp_min.ld.S
@@ -50,6 +50,7 @@
__TEXT_START__ = .;
*entrypoint.o(.text*)
*(.text*)
+ *(.vectors)
. = NEXT(4096);
__TEXT_END__ = .;
} >RAM
@@ -98,6 +99,7 @@
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
+ *(.vectors)
__RO_END_UNALIGNED__ = .;
/*
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index a8b572e..ac7f03e 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -58,6 +58,6 @@
include ${SP_MIN_PLAT_MAKEFILE}
endif
-RESET_TO_SP_MIN := 1
+RESET_TO_SP_MIN := 0
$(eval $(call add_define,RESET_TO_SP_MIN))
$(eval $(call assert_boolean,RESET_TO_SP_MIN))
diff --git a/bl32/sp_min/sp_min_main.c b/bl32/sp_min/sp_min_main.c
index 31cab3d..02663a2 100644
--- a/bl32/sp_min/sp_min_main.c
+++ b/bl32/sp_min/sp_min_main.c
@@ -151,24 +151,33 @@
}
/******************************************************************************
+ * Implement the ARM Standard Service function to get arguments for a
+ * particular service.
+ *****************************************************************************/
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask)
+{
+ /* Setup the arguments for PSCI Library */
+ DEFINE_STATIC_PSCI_LIB_ARGS_V1(psci_args, sp_min_warm_entrypoint);
+
+ /* PSCI is the only ARM Standard Service implemented */
+ assert(svc_mask == PSCI_FID_MASK);
+
+ return (uintptr_t)&psci_args;
+}
+
+/******************************************************************************
* The SP_MIN main function. Do the platform and PSCI Library setup. Also
* initialize the runtime service framework.
*****************************************************************************/
void sp_min_main(void)
{
- /* Perform platform setup in TSP MIN */
- sp_min_platform_setup();
+ NOTICE("SP_MIN: %s\n", version_string);
+ NOTICE("SP_MIN: %s\n", build_message);
- /*
- * Initialize the PSCI library and perform the remaining generic
- * architectural setup from PSCI.
- */
- psci_setup((uintptr_t)sp_min_warm_entrypoint);
+ /* Perform the SP_MIN platform setup */
+ sp_min_platform_setup();
- /*
- * Initialize the runtime services e.g. psci
- * This is where the monitor mode will be initialized
- */
+ /* Initialize the runtime services e.g. psci */
INFO("SP_MIN: Initializing runtime services\n");
runtime_svc_init();
diff --git a/common/aarch32/debug.S b/common/aarch32/debug.S
index 01ec1e3..6be6951 100644
--- a/common/aarch32/debug.S
+++ b/common/aarch32/debug.S
@@ -32,6 +32,7 @@
#include <asm_macros.S>
.globl do_panic
+ .globl report_exception
/***********************************************************
* The common implementation of do_panic for all BL stages
@@ -40,3 +41,14 @@
b plat_panic_handler
endfunc do_panic
+ /***********************************************************
+ * This function is called from the vector table for
+ * unhandled exceptions. It reads the current mode and
+ * passes it to platform.
+ ***********************************************************/
+func report_exception
+ mrs r0, cpsr
+ and r0, #MODE32_MASK
+ bl plat_report_exception
+ bl plat_panic_handler
+endfunc report_exception
diff --git a/common/bl_common.c b/common/bl_common.c
index bae02d4..193e972 100644
--- a/common/bl_common.c
+++ b/common/bl_common.c
@@ -53,10 +53,7 @@
return value;
}
-static inline unsigned int is_page_aligned (uintptr_t addr) {
- return (addr & (PAGE_SIZE - 1)) == 0;
-}
-
+#if !LOAD_IMAGE_V2
/******************************************************************************
* Determine whether the memory region delimited by 'addr' and 'size' is free,
* given the extents of free memory.
@@ -179,6 +176,7 @@
INFO(" free region = [base = %p, size = 0x%zx]\n",
(void *) mem_layout->free_base, mem_layout->free_size);
}
+#endif /* LOAD_IMAGE_V2 */
/* Generic function to return the size of an image */
size_t image_size(unsigned int image_id)
@@ -223,6 +221,156 @@
return image_size;
}
+#if LOAD_IMAGE_V2
+
+/*******************************************************************************
+ * Generic function to load an image at a specific address given
+ * an image ID and extents of free memory.
+ *
+ * If the load is successful then the image information is updated.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ ******************************************************************************/
+int load_image(unsigned int image_id, image_info_t *image_data)
+{
+ uintptr_t dev_handle;
+ uintptr_t image_handle;
+ uintptr_t image_spec;
+ uintptr_t image_base;
+ size_t image_size;
+ size_t bytes_read;
+ int io_result;
+
+ assert(image_data != NULL);
+ assert(image_data->h.version >= VERSION_2);
+
+ image_base = image_data->image_base;
+
+ /* Obtain a reference to the image by querying the platform layer */
+ io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+ if (io_result != 0) {
+ WARN("Failed to obtain reference to image id=%u (%i)\n",
+ image_id, io_result);
+ return io_result;
+ }
+
+ /* Attempt to access the image */
+ io_result = io_open(dev_handle, image_spec, &image_handle);
+ if (io_result != 0) {
+ WARN("Failed to access image id=%u (%i)\n",
+ image_id, io_result);
+ return io_result;
+ }
+
+ INFO("Loading image id=%u at address %p\n", image_id,
+ (void *) image_base);
+
+ /* Find the size of the image */
+ io_result = io_size(image_handle, &image_size);
+ if ((io_result != 0) || (image_size == 0)) {
+ WARN("Failed to determine the size of the image id=%u (%i)\n",
+ image_id, io_result);
+ goto exit;
+ }
+
+ /* Check that the image size to load is within limit */
+ if (image_size > image_data->image_max_size) {
+ WARN("Image id=%u size out of bounds\n", image_id);
+ io_result = -EFBIG;
+ goto exit;
+ }
+
+ image_data->image_size = image_size;
+
+ /* We have enough space so load the image now */
+ /* TODO: Consider whether to try to recover/retry a partially successful read */
+ io_result = io_read(image_handle, image_base, image_size, &bytes_read);
+ if ((io_result != 0) || (bytes_read < image_size)) {
+ WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
+ goto exit;
+ }
+
+#if !TRUSTED_BOARD_BOOT
+ /*
+ * File has been successfully loaded.
+ * Flush the image to main memory so that it can be executed later by
+ * any CPU, regardless of cache and MMU state.
+ * When TBB is enabled the image is flushed later, after image
+ * authentication.
+ */
+ flush_dcache_range(image_base, image_size);
+#endif /* TRUSTED_BOARD_BOOT */
+
+ INFO("Image id=%u loaded: %p - %p\n", image_id, (void *) image_base,
+ (void *) (image_base + image_size));
+
+exit:
+ io_close(image_handle);
+ /* Ignore improbable/unrecoverable error in 'close' */
+
+ /* TODO: Consider maintaining open device connection from this bootloader stage */
+ io_dev_close(dev_handle);
+ /* Ignore improbable/unrecoverable error in 'dev_close' */
+
+ return io_result;
+}
+
+/*******************************************************************************
+ * Generic function to load and authenticate an image. The image is actually
+ * loaded by calling the 'load_image()' function. Therefore, it returns the
+ * same error codes if the loading operation failed, or -EAUTH if the
+ * authentication failed. In addition, this function uses recursion to
+ * authenticate the parent images up to the root of trust.
+ ******************************************************************************/
+int load_auth_image(unsigned int image_id, image_info_t *image_data)
+{
+ int rc;
+
+#if TRUSTED_BOARD_BOOT
+ unsigned int parent_id;
+
+ /* Use recursion to authenticate parent images */
+ rc = auth_mod_get_parent_id(image_id, &parent_id);
+ if (rc == 0) {
+ rc = load_auth_image(parent_id, image_data);
+ if (rc != 0) {
+ return rc;
+ }
+ }
+#endif /* TRUSTED_BOARD_BOOT */
+
+ /* Load the image */
+ rc = load_image(image_id, image_data);
+ if (rc != 0) {
+ return rc;
+ }
+
+#if TRUSTED_BOARD_BOOT
+ /* Authenticate it */
+ rc = auth_mod_verify_img(image_id,
+ (void *)image_data->image_base,
+ image_data->image_size);
+ if (rc != 0) {
+ memset((void *)image_data->image_base, 0x00,
+ image_data->image_size);
+ flush_dcache_range(image_data->image_base,
+ image_data->image_size);
+ return -EAUTH;
+ }
+
+ /*
+ * File has been successfully loaded and authenticated.
+ * Flush the image to main memory so that it can be executed later by
+ * any CPU, regardless of cache and MMU state.
+ */
+ flush_dcache_range(image_data->image_base, image_data->image_size);
+#endif /* TRUSTED_BOARD_BOOT */
+
+ return 0;
+}
+
+#else /* LOAD_IMAGE_V2 */
+
/*******************************************************************************
* Generic function to load an image at a specific address given an image ID and
* extents of free memory.
@@ -255,7 +403,7 @@
assert(mem_layout != NULL);
assert(image_data != NULL);
- assert(image_data->h.version >= VERSION_1);
+ assert(image_data->h.version == VERSION_1);
/* Obtain a reference to the image by querying the platform layer */
io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
@@ -348,8 +496,10 @@
/*******************************************************************************
* Generic function to load and authenticate an image. The image is actually
- * loaded by calling the 'load_image()' function. In addition, this function
- * uses recursion to authenticate the parent images up to the root of trust.
+ * loaded by calling the 'load_image()' function. Therefore, it returns the
+ * same error codes if the loading operation failed, or -EAUTH if the
+ * authentication failed. In addition, this function uses recursion to
+ * authenticate the parent images up to the root of trust.
******************************************************************************/
int load_auth_image(meminfo_t *mem_layout,
unsigned int image_id,
@@ -403,6 +553,8 @@
return 0;
}
+#endif /* LOAD_IMAGE_V2 */
+
/*******************************************************************************
* Print the content of an entry_point_info_t structure.
******************************************************************************/
diff --git a/common/desc_image_load.c b/common/desc_image_load.c
new file mode 100644
index 0000000..a9762b7
--- /dev/null
+++ b/common/desc_image_load.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <desc_image_load.h>
+
+
+extern bl_mem_params_node_t *bl_mem_params_desc_ptr;
+extern unsigned int bl_mem_params_desc_num;
+
+static bl_load_info_t bl_load_info;
+static bl_params_t next_bl_params;
+
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void flush_bl_params_desc(void)
+{
+ flush_dcache_range((unsigned long)bl_mem_params_desc_ptr,
+ sizeof(*bl_mem_params_desc_ptr) * bl_mem_params_desc_num);
+}
+
+/*******************************************************************************
+ * This function returns the index for given image_id, within the
+ * image descriptor array provided by bl_image_info_descs_ptr, if the
+ * image is found else it returns -1.
+ ******************************************************************************/
+int get_bl_params_node_index(unsigned int image_id)
+{
+ int index;
+ assert(image_id != INVALID_IMAGE_ID);
+
+ for (index = 0; index < bl_mem_params_desc_num; index++) {
+ if (bl_mem_params_desc_ptr[index].image_id == image_id)
+ return index;
+ }
+
+ return -1;
+}
+
+/*******************************************************************************
+ * This function returns the pointer to `bl_mem_params_node_t` object for
+ * given image_id, within the image descriptor array provided by
+ * bl_mem_params_desc_ptr, if the image is found else it returns NULL.
+ ******************************************************************************/
+bl_mem_params_node_t *get_bl_mem_params_node(unsigned int image_id)
+{
+ int index;
+ assert(image_id != INVALID_IMAGE_ID);
+
+ index = get_bl_params_node_index(image_id);
+ if (index >= 0)
+ return &bl_mem_params_desc_ptr[index];
+ else
+ return NULL;
+}
+
+/*******************************************************************************
+ * This function creates the list of loadable images, by populating and
+ * linking each `bl_load_info_node_t` type node, using the internal array
+ * of image descriptor provided by bl_mem_params_desc_ptr. It also populates
+ * and returns `bl_load_info_t` type structure that contains head of the list
+ * of loadable images.
+ ******************************************************************************/
+bl_load_info_t *get_bl_load_info_from_mem_params_desc(void)
+{
+ int index = 0;
+
+ /* If there is no image to start with, return NULL */
+ if (!bl_mem_params_desc_num)
+ return NULL;
+
+ /* Assign initial data structures */
+ bl_load_info_node_t *bl_node_info =
+ &bl_mem_params_desc_ptr[index].load_node_mem;
+ bl_load_info.head = bl_node_info;
+ SET_PARAM_HEAD(&bl_load_info, PARAM_BL_LOAD_INFO, VERSION_2, 0);
+
+ /* Go through the image descriptor array and create the list */
+ for (; index < bl_mem_params_desc_num; index++) {
+
+ /* Populate the image information */
+ bl_node_info->image_id = bl_mem_params_desc_ptr[index].image_id;
+ bl_node_info->image_info = &bl_mem_params_desc_ptr[index].image_info;
+
+ /* Link next image if present */
+ if ((index + 1) < bl_mem_params_desc_num) {
+ /* Get the memory and link the next node */
+ bl_node_info->next_load_info =
+ &bl_mem_params_desc_ptr[index + 1].load_node_mem;
+ bl_node_info = bl_node_info->next_load_info;
+ }
+ }
+
+ return &bl_load_info;
+}
+
+/*******************************************************************************
+ * This function creates the list of executable images, by populating and
+ * linking each `bl_params_node_t` type node, using the internal array of
+ * image descriptor provided by bl_mem_params_desc_ptr. It also populates
+ * and returns `bl_params_t` type structure that contains head of the list
+ * of executable images.
+ ******************************************************************************/
+bl_params_t *get_next_bl_params_from_mem_params_desc(void)
+{
+ int count;
+ unsigned int img_id = 0;
+ int link_index = 0;
+ bl_params_node_t *bl_current_exec_node = NULL;
+ bl_params_node_t *bl_last_exec_node = NULL;
+ bl_mem_params_node_t *desc_ptr;
+
+ /* If there is no image to start with, return NULL */
+ if (!bl_mem_params_desc_num)
+ return NULL;
+
+ /* Get the list HEAD */
+ for (count = 0; count < bl_mem_params_desc_num; count++) {
+
+ desc_ptr = &bl_mem_params_desc_ptr[count];
+
+ if ((EP_GET_EXE(desc_ptr->ep_info.h.attr) == EXECUTABLE) &&
+ (EP_GET_FIRST_EXE(desc_ptr->ep_info.h.attr) == EP_FIRST_EXE)) {
+ next_bl_params.head = &desc_ptr->params_node_mem;
+ link_index = count;
+ break;
+ }
+ }
+
+ /* Make sure we have a HEAD node */
+ assert(next_bl_params.head != NULL);
+
+ /* Populate the HEAD information */
+ SET_PARAM_HEAD(&next_bl_params, PARAM_BL_PARAMS, VERSION_2, 0);
+
+ /*
+ * Go through the image descriptor array and create the list.
+ * This bounded loop is to make sure that we are not looping forever.
+ */
+ for (count = 0 ; count < bl_mem_params_desc_num; count++) {
+
+ desc_ptr = &bl_mem_params_desc_ptr[link_index];
+
+ /* Make sure the image is executable */
+ assert(EP_GET_EXE(desc_ptr->ep_info.h.attr) == EXECUTABLE);
+
+ /* Get the memory for current node */
+ bl_current_exec_node = &desc_ptr->params_node_mem;
+
+ /* Populate the image information */
+ bl_current_exec_node->image_id = desc_ptr->image_id;
+ bl_current_exec_node->image_info = &desc_ptr->image_info;
+ bl_current_exec_node->ep_info = &desc_ptr->ep_info;
+
+ if (bl_last_exec_node) {
+ /* Assert if loop detected */
+ assert(bl_last_exec_node->next_params_info == NULL);
+
+ /* Link the previous node to the current one */
+ bl_last_exec_node->next_params_info = bl_current_exec_node;
+ }
+
+ /* Update the last node */
+ bl_last_exec_node = bl_current_exec_node;
+
+ /* If no next hand-off image then break out */
+ img_id = desc_ptr->next_handoff_image_id;
+ if (img_id == INVALID_IMAGE_ID)
+ break;
+
+ /* Get the index for the next hand-off image */
+ link_index = get_bl_params_node_index(img_id);
+ assert((link_index > 0) &&
+ (link_index < bl_mem_params_desc_num));
+ }
+
+ /* Invalid image is expected to terminate the loop */
+ assert(img_id == INVALID_IMAGE_ID);
+
+ /* Populate arg0 for the next BL image */
+ next_bl_params.head->ep_info->args.arg0 = (unsigned long)&next_bl_params;
+
+ /* Flush the parameters to be passed to the next BL image */
+ flush_dcache_range((unsigned long)&next_bl_params,
+ sizeof(next_bl_params));
+
+ return &next_bl_params;
+}
diff --git a/docs/porting-guide.md b/docs/porting-guide.md
index 195c937..93c0169 100644
--- a/docs/porting-guide.md
+++ b/docs/porting-guide.md
@@ -721,7 +721,6 @@
linear CPU index, please refer [Power Domain Topology Design].
-
2.4 Common optional modifications
---------------------------------
@@ -777,11 +776,15 @@
The default implementation doesn't do anything, to avoid making assumptions
about the way the platform displays its status information.
-This function receives the exception type as its argument. Possible values for
-exceptions types are listed in the [include/common/bl_common.h] header file.
-Note that these constants are not related to any architectural exception code;
-they are just an ARM Trusted Firmware convention.
+For AArch64, this function receives the exception type as its argument.
+Possible values for exceptions types are listed in the
+[include/common/bl_common.h] header file. Note that these constants are not
+related to any architectural exception code; they are just an ARM Trusted
+Firmware convention.
+For AArch32, this function receives the exception mode as its argument.
+Possible values for exception modes are listed in the
+[include/lib/aarch32/arch.h] header file.
### Function : plat_reset_handler()
@@ -841,9 +844,36 @@
environment is initialized.
Note: The address from where it was called is stored in x30 (Link Register).
-
The default implementation simply spins.
+
+### Function : plat_get_bl_image_load_info()
+
+ Argument : void
+ Return : bl_load_info_t *
+
+This function returns pointer to the list of images that the platform has
+populated to load. This function is currently invoked in BL2 to load the
+BL3xx images, when LOAD_IMAGE_V2 is enabled.
+
+### Function : plat_get_next_bl_params()
+
+ Argument : void
+ Return : bl_params_t *
+
+This function returns a pointer to the shared memory that the platform has
+kept aside to pass trusted firmware related information that next BL image
+needs. This function is currently invoked in BL2 to pass this information to
+the next BL image, when LOAD_IMAGE_V2 is enabled.
+
+### Function : plat_flush_next_bl_params()
+
+ Argument : void
+ Return : void
+
+This function flushes to main memory all the image params that are passed to
+next image. This function is currently invoked in BL2 to flush this information
+to the next BL image, when LOAD_IMAGE_V2 is enabled.
3. Modifications specific to a Boot Loader stage
-------------------------------------------------
@@ -1174,6 +1204,20 @@
populated with the extents of secure RAM available for BL2 to use. See
`bl2_early_platform_setup()` above.
+
+Following function is required only when LOAD_IMAGE_V2 is enabled.
+
+### Function : bl2_plat_handle_post_image_load() [mandatory]
+
+ Argument : unsigned int
+ Return : int
+
+This function can be used by the platforms to update/use image information
+for given `image_id`. This function is currently invoked in BL2 to handle
+BL image specific information based on the `image_id` passed, when
+LOAD_IMAGE_V2 is enabled.
+
+Following functions are required only when LOAD_IMAGE_V2 is disabled.
### Function : bl2_plat_get_scp_bl2_meminfo() [mandatory]
@@ -2194,6 +2238,7 @@
[plat/common/aarch64/platform_up_stack.S]: ../plat/common/aarch64/platform_up_stack.S
[plat/arm/board/fvp/fvp_pm.c]: ../plat/arm/board/fvp/fvp_pm.c
[include/common/bl_common.h]: ../include/common/bl_common.h
+[include/lib/aarch32/arch.h]: ../include/lib/aarch32/arch.h
[include/plat/arm/common/arm_def.h]: ../include/plat/arm/common/arm_def.h
[include/plat/common/common_def.h]: ../include/plat/common/common_def.h
[include/plat/common/platform.h]: ../include/plat/common/platform.h
diff --git a/docs/user-guide.md b/docs/user-guide.md
index d545262..d7d743a 100644
--- a/docs/user-guide.md
+++ b/docs/user-guide.md
@@ -430,6 +430,12 @@
pages" section in [Firmware Design]. This flag is disabled by default and
affects all BL images.
+* `LOAD_IMAGE_V2`: Boolean option to enable support for new version (v2) of
+ image loading, which provides more flexibility and scalability around what
+ images are loaded and executed during boot. Default is 0.
+ Note: `TRUSTED_BOARD_BOOT` is currently not supported when `LOAD_IMAGE_V2`
+ is enabled.
+
#### ARM development platform specific build options
* `ARM_TSP_RAM_LOCATION`: location of the TSP binary. Options:
diff --git a/include/bl32/sp_min/platform_sp_min.h b/include/bl32/sp_min/platform_sp_min.h
index ae9dd58..c8c3fc5 100644
--- a/include/bl32/sp_min/platform_sp_min.h
+++ b/include/bl32/sp_min/platform_sp_min.h
@@ -34,7 +34,8 @@
/*******************************************************************************
* Mandatory SP_MIN functions
******************************************************************************/
-void sp_min_early_platform_setup(void);
+void sp_min_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2);
void sp_min_plat_arch_setup(void);
void sp_min_platform_setup(void);
entry_point_info_t *sp_min_plat_get_bl33_ep_info(void);
diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S
index 11e45bb..5f04499 100644
--- a/include/common/aarch32/asm_macros.S
+++ b/include/common/aarch32/asm_macros.S
@@ -70,6 +70,16 @@
.endm
/*
+ * Declare the exception vector table, enforcing it is aligned on a
+ * 32 byte boundary.
+ */
+ .macro vector_base label
+ .section .vectors, "ax"
+ .align 5
+ \label:
+ .endm
+
+ /*
* This macro calculates the base address of the current CPU's multi
* processor(MP) stack using the plat_my_core_pos() index, the name of
* the stack storage and the size of each stack.
diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S
new file mode 100644
index 0000000..a572ef9
--- /dev/null
+++ b/include/common/aarch32/el3_common_macros.S
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EL3_COMMON_MACROS_S__
+#define __EL3_COMMON_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+ /*
+ * Helper macro to initialise EL3 registers we care about.
+ */
+ .macro el3_arch_init_common _exception_vectors
+ /* ---------------------------------------------------------------------
+ * Enable the instruction cache and alignment checks
+ * ---------------------------------------------------------------------
+ */
+ ldr r1, =(SCTLR_RES1 | SCTLR_I_BIT | SCTLR_A_BIT)
+ ldcopr r0, SCTLR
+ orr r0, r0, r1
+ stcopr r0, SCTLR
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Set the exception vectors (VBAR/MVBAR).
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =\_exception_vectors
+ stcopr r0, VBAR
+ stcopr r0, MVBAR
+ isb
+
+ /* -----------------------------------------------------
+ * Enable the SIF bit to disable instruction fetches
+ * from Non-secure memory.
+ * -----------------------------------------------------
+ */
+ ldcopr r0, SCR
+ orr r0, r0, #SCR_SIF_BIT
+ stcopr r0, SCR
+
+ /* -----------------------------------------------------
+ * Enable the Asynchronous data abort now that the
+ * exception vectors have been setup.
+ * -----------------------------------------------------
+ */
+ cpsie a
+ isb
+
+ /* Enable access to Advanced SIMD registers */
+ ldcopr r0, NSACR
+ bic r0, r0, #NSASEDIS_BIT
+ bic r0, r0, #NSTRCDIS_BIT
+ orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
+ stcopr r0, NSACR
+ isb
+
+ /*
+ * Enable access to Advanced SIMD, Floating point and to the Trace
+ * functionality as well.
+ */
+ ldcopr r0, CPACR
+ bic r0, r0, #ASEDIS_BIT
+ bic r0, r0, #TRCDIS_BIT
+ orr r0, r0, #CPACR_ENABLE_FP_ACCESS
+ stcopr r0, CPACR
+ isb
+
+ vmrs r0, FPEXC
+ orr r0, r0, #FPEXC_EN_BIT
+ vmsr FPEXC, r0
+ isb
+ .endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ * _set_endian:
+ * Whether the macro needs to configure the endianness of data accesses.
+ *
+ * _warm_boot_mailbox:
+ * Whether the macro needs to detect the type of boot (cold/warm). The
+ * detection is based on the platform entrypoint address : if it is zero
+ * then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ * this macro jumps on the platform entrypoint address.
+ *
+ * _secondary_cold_boot:
+ * Whether the macro needs to identify the CPU that is calling it: primary
+ * CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ * the platform initialisations, while the secondaries will be put in a
+ * platform-specific state in the meantime.
+ *
+ * If the caller knows this macro will only be called by the primary CPU
+ * then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ * Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ * Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ * Address of the exception vectors to program in the VBAR_EL3 register.
+ * -----------------------------------------------------------------------------
+ */
+ .macro el3_entrypoint_common \
+ _set_endian, _warm_boot_mailbox, _secondary_cold_boot, \
+ _init_memory, _init_c_runtime, _exception_vectors
+
+ /* Make sure we are in Secure Mode */
+#if ASM_ASSERTION
+ ldcopr r0, SCR
+ tst r0, #SCR_NS_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ .if \_set_endian
+ /* -------------------------------------------------------------
+ * Set the CPU endianness before doing anything that might
+ * involve memory reads or writes.
+ * -------------------------------------------------------------
+ */
+ ldcopr r0, SCTLR
+ bic r0, r0, #SCTLR_EE_BIT
+ stcopr r0, SCTLR
+ isb
+ .endif /* _set_endian */
+
+ /* Switch to monitor mode */
+ cps #MODE32_mon
+ isb
+
+ .if \_warm_boot_mailbox
+ /* -------------------------------------------------------------
+ * This code will be executed for both warm and cold resets.
+ * Now is the time to distinguish between the two.
+ * Query the platform entrypoint address and if it is not zero
+ * then it means it is a warm boot so jump to this address.
+ * -------------------------------------------------------------
+ */
+ bl plat_get_my_entrypoint
+ cmp r0, #0
+ bxne r0
+ .endif /* _warm_boot_mailbox */
+
+ /* ---------------------------------------------------------------------
+ * It is a cold boot.
+ * Perform any processor specific actions upon reset e.g. cache, TLB
+ * invalidations etc.
+ * ---------------------------------------------------------------------
+ */
+ bl reset_handler
+
+ el3_arch_init_common \_exception_vectors
+
+ .if \_secondary_cold_boot
+ /* -------------------------------------------------------------
+ * Check if this is a primary or secondary CPU cold boot.
+ * The primary CPU will set up the platform while the
+ * secondaries are placed in a platform-specific state until the
+ * primary CPU performs the necessary actions to bring them out
+ * of that state and allows entry into the OS.
+ * -------------------------------------------------------------
+ */
+ bl plat_is_my_cpu_primary
+ cmp r0, #0
+ bne do_primary_cold_boot
+
+ /* This is a cold boot on a secondary CPU */
+ bl plat_secondary_cold_boot_setup
+ /* plat_secondary_cold_boot_setup() is not supposed to return */
+ bl plat_panic_handler
+
+ do_primary_cold_boot:
+ .endif /* _secondary_cold_boot */
+
+ /* ---------------------------------------------------------------------
+ * Initialize memory now. Secondary CPU initialization won't get to this
+ * point.
+ * ---------------------------------------------------------------------
+ */
+
+ .if \_init_memory
+ bl platform_mem_init
+ .endif /* _init_memory */
+
+ /* ---------------------------------------------------------------------
+ * Init C runtime environment:
+ * - Zero-initialise the NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section (if any).
+ * - Relocate the data section from ROM to RAM, if required.
+ * ---------------------------------------------------------------------
+ */
+ .if \_init_c_runtime
+#if IMAGE_BL32
+ /* -----------------------------------------------------------------
+ * Invalidate the RW memory used by the BL32 (SP_MIN) image. This
+ * includes the data and NOBITS sections. This is done to
+ * safeguard against possible corruption of this memory by
+ * dirty cache lines in a system cache as a result of use by
+ * an earlier boot loader stage.
+ * -----------------------------------------------------------------
+ */
+ ldr r0, =__RW_START__
+ ldr r1, =__RW_END__
+ sub r1, r1, r0
+ bl inv_dcache_range
+#endif /* IMAGE_BL32 */
+
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_SIZE__
+ bl zeromem
+
+#if USE_COHERENT_MEM
+ ldr r0, =__COHERENT_RAM_START__
+ ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+ bl zeromem
+#endif
+
+#if IMAGE_BL1
+ /* -----------------------------------------------------
+ * Copy data from ROM to RAM.
+ * -----------------------------------------------------
+ */
+ ldr r0, =__DATA_RAM_START__
+ ldr r1, =__DATA_ROM_START__
+ ldr r2, =__DATA_SIZE__
+ bl memcpy
+#endif
+ .endif /* _init_c_runtime */
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+ * the MMU is enabled. There is no risk of reading stale stack memory
+ * after enabling the MMU as only the primary CPU is running at the
+ * moment.
+ * ---------------------------------------------------------------------
+ */
+ bl plat_set_my_stack
+ .endm
+
+#endif /* __EL3_COMMON_MACROS_S__ */
diff --git a/include/common/bl_common.h b/include/common/bl_common.h
index 942843c..12d5036 100644
--- a/include/common/bl_common.h
+++ b/include/common/bl_common.h
@@ -93,11 +93,23 @@
#define EP_GET_EXE(x) (x & EP_EXE_MASK)
#define EP_SET_EXE(x, ee) ((x) = ((x) & ~EP_EXE_MASK) | (ee))
+#define EP_FIRST_EXE_MASK 0x10
+#define EP_FIRST_EXE 0x10
+#define EP_GET_FIRST_EXE(x) ((x) & EP_FIRST_EXE_MASK)
+#define EP_SET_FIRST_EXE(x, ee) ((x) = ((x) & ~EP_FIRST_EXE_MASK) | (ee))
+
#define PARAM_EP 0x01
#define PARAM_IMAGE_BINARY 0x02
#define PARAM_BL31 0x03
+#define PARAM_BL_LOAD_INFO 0x04
+#define PARAM_BL_PARAMS 0x05
+#define PARAM_PSCI_LIB_ARGS 0x06
+
+#define IMAGE_ATTRIB_SKIP_LOADING 0x02
+#define IMAGE_ATTRIB_PLAT_SETUP 0x04
#define VERSION_1 0x01
+#define VERSION_2 0x02
#define INVALID_IMAGE_ID (0xFFFFFFFF)
@@ -181,8 +193,10 @@
typedef struct meminfo {
uintptr_t total_base;
size_t total_size;
+#if !LOAD_IMAGE_V2
uintptr_t free_base;
size_t free_size;
+#endif
} meminfo_t;
typedef struct aapcs64_params {
@@ -245,6 +259,9 @@
param_header_t h;
uintptr_t image_base; /* physical address of base of image */
uint32_t image_size; /* bytes read from image file */
+#if LOAD_IMAGE_V2
+ uint32_t image_max_size;
+#endif
} image_info_t;
/*****************************************************************************
@@ -263,6 +280,39 @@
entry_point_info_t ep_info;
} image_desc_t;
+#if LOAD_IMAGE_V2
+/* BL image node in the BL image loading sequence */
+typedef struct bl_load_info_node {
+ unsigned int image_id;
+ image_info_t *image_info;
+ struct bl_load_info_node *next_load_info;
+} bl_load_info_node_t;
+
+/* BL image head node in the BL image loading sequence */
+typedef struct bl_load_info {
+ param_header_t h;
+ bl_load_info_node_t *head;
+} bl_load_info_t;
+
+/* BL image node in the BL image execution sequence */
+typedef struct bl_params_node {
+ unsigned int image_id;
+ image_info_t *image_info;
+ entry_point_info_t *ep_info;
+ struct bl_params_node *next_params_info;
+} bl_params_node_t;
+
+/*
+ * BL image head node in the BL image execution sequence
+ * It is also used to pass information to next BL image.
+ */
+typedef struct bl_params {
+ param_header_t h;
+ bl_params_node_t *head;
+} bl_params_t;
+
+#else /* LOAD_IMAGE_V2 */
+
/*******************************************************************************
* This structure represents the superset of information that can be passed to
* BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
@@ -286,6 +336,7 @@
image_info_t *bl33_image_info;
} bl31_params_t;
+#endif /* LOAD_IMAGE_V2 */
/*
* Compile time assertions related to the 'entry_point_info' structure to
@@ -308,24 +359,34 @@
/*******************************************************************************
* Function & variable prototypes
******************************************************************************/
-uintptr_t page_align(uintptr_t, unsigned);
size_t image_size(unsigned int image_id);
+
+#if LOAD_IMAGE_V2
+
+int load_image(unsigned int image_id, image_info_t *image_data);
+int load_auth_image(unsigned int image_id, image_info_t *image_data);
+
+#else
+
+uintptr_t page_align(uintptr_t, unsigned);
int load_image(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info);
int load_auth_image(meminfo_t *mem_layout,
- unsigned int image_name,
+ unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info);
-extern const char build_message[];
-extern const char version_string[];
-
void reserve_mem(uintptr_t *free_base, size_t *free_size,
uintptr_t addr, size_t size);
+#endif /* LOAD_IMAGE_V2 */
+
+extern const char build_message[];
+extern const char version_string[];
+
void print_entry_point_info(const entry_point_info_t *ep_info);
#endif /*__ASSEMBLY__*/
diff --git a/include/common/desc_image_load.h b/include/common/desc_image_load.h
new file mode 100644
index 0000000..7834262
--- /dev/null
+++ b/include/common/desc_image_load.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __DESC_IMAGE_LOAD_H__
+#define __DESC_IMAGE_LOAD_H__
+
+#include <bl_common.h>
+
+#if LOAD_IMAGE_V2
+/* Following structure is used to store BL ep/image info. */
+typedef struct bl_mem_params_node {
+ unsigned int image_id;
+ image_info_t image_info;
+ entry_point_info_t ep_info;
+ unsigned int next_handoff_image_id;
+ bl_load_info_node_t load_node_mem;
+ bl_params_node_t params_node_mem;
+} bl_mem_params_node_t;
+
+/*
+ * Macro to register list of BL image descriptors,
+ * defined as an array of bl_mem_params_node_t.
+ */
+#define REGISTER_BL_IMAGE_DESCS(_img_desc) \
+ bl_mem_params_node_t *bl_mem_params_desc_ptr = &_img_desc[0]; \
+ unsigned int bl_mem_params_desc_num = ARRAY_SIZE(_img_desc);
+
+/* BL image loading utility functions */
+void flush_bl_params_desc(void);
+int get_bl_params_node_index(unsigned int image_id);
+bl_mem_params_node_t *get_bl_mem_params_node(unsigned int image_id);
+bl_load_info_t *get_bl_load_info_from_mem_params_desc(void);
+bl_params_t *get_next_bl_params_from_mem_params_desc(void);
+
+
+#endif /* LOAD_IMAGE_V2 */
+#endif /* __DESC_IMAGE_LOAD_H__ */
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 6653cd1..aba15df 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -191,6 +191,7 @@
/* NASCR definitions */
#define NSASEDIS_BIT (1 << 15)
+#define NSTRCDIS_BIT (1 << 20)
#define NASCR_CP11_BIT (1 << 11)
#define NASCR_CP10_BIT (1 << 10)
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index ddf660b..3b4349c 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -187,6 +187,9 @@
void clean_dcache_range(uintptr_t addr, size_t size);
void inv_dcache_range(uintptr_t addr, size_t size);
+void disable_mmu_secure(void);
+void disable_mmu_icache_secure(void);
+
DEFINE_SYSOP_FUNC(wfi)
DEFINE_SYSOP_FUNC(wfe)
DEFINE_SYSOP_FUNC(sev)
@@ -196,6 +199,9 @@
DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_FUNC(isb)
+void __dead2 smc(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3,
+ uint32_t r4, uint32_t r5, uint32_t r6, uint32_t r7);
+
DEFINE_SYSREG_RW_FUNCS(spsr)
DEFINE_SYSREG_RW_FUNCS(cpsr)
@@ -289,4 +295,6 @@
#define read_cntpct_el0() read64_cntpct()
+#define read_ctr_el0() read_ctr()
+
#endif /* __ARCH_HELPERS_H__ */
diff --git a/include/lib/cpus/aarch32/cortex_a32.h b/include/lib/cpus/aarch32/cortex_a32.h
new file mode 100644
index 0000000..458b41f
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a32.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CORTEX_A32_H__
+#define __CORTEX_A32_H__
+
+/* Cortex-A32 Main ID register for revision 0 */
+#define CORTEX_A32_MIDR 0x410FD010
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ * CPUECTLR_EL1 is an implementation-specific register.
+ ******************************************************************************/
+#define CORTEX_A32_CPUECTLR_EL1 p15, 1, c15
+#define CORTEX_A32_CPUECTLR_SMPEN_BIT (1 << 6)
+
+#endif /* __CORTEX_A32_H__ */
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
index f58f3e9..2b9947e 100644
--- a/include/lib/cpus/aarch32/cpu_macros.S
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -42,12 +42,16 @@
CPU_MIDR: /* cpu_ops midr */
.space 4
/* Reset fn is needed during reset */
+#if IMAGE_BL1 || IMAGE_BL32
CPU_RESET_FUNC: /* cpu_ops reset_func */
.space 4
+#endif
+#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
.space 4
CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
.space 4
+#endif
CPU_OPS_SIZE = .
/*
@@ -60,13 +64,17 @@
.align 2
.type cpu_ops_\_name, %object
.word \_midr
+#if IMAGE_BL1 || IMAGE_BL32
.if \_noresetfunc
.word 0
.else
.word \_name\()_reset_func
.endif
+#endif
+#if IMAGE_BL32
.word \_name\()_core_pwr_dwn
.word \_name\()_cluster_pwr_dwn
+#endif
.endm
#endif /* __CPU_MACROS_S__ */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
index b264fc3..676973c 100644
--- a/include/lib/el3_runtime/context_mgmt.h
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -103,5 +103,9 @@
"msr spsel, #0\n"
: : "r" (context));
}
+
+#else
+void *cm_get_next_context(void);
#endif /* AARCH32 */
+
#endif /* __CM_H__ */
diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h
index 02cbbf3..34de4c2 100644
--- a/include/lib/psci/psci.h
+++ b/include/lib/psci/psci.h
@@ -32,6 +32,7 @@
#define __PSCI_H__
#include <bakery_lock.h>
+#include <bl_common.h>
#include <platform_def.h> /* for PLAT_NUM_PWR_DOMAINS */
#if ENABLE_PLAT_COMPAT
#include <psci_compat.h>
@@ -356,10 +357,45 @@
*/
void psci_entrypoint(void) __deprecated;
-/*******************************************************************************
- * Forward declarations
- ******************************************************************************/
-struct entry_point_info;
+/*
+ * Function prototype for the warmboot entrypoint function which will be
+ * programmed in the mailbox by the platform.
+ */
+typedef void (*mailbox_entrypoint_t)(void);
+
+/******************************************************************************
+ * Structure to pass PSCI Library arguments.
+ *****************************************************************************/
+typedef struct psci_lib_args {
+ /* The version information of PSCI Library Interface */
+ param_header_t h;
+ /* The warm boot entrypoint function */
+ mailbox_entrypoint_t mailbox_ep;
+} psci_lib_args_t;
+
+/* Helper macro to set the psci_lib_args_t structure at runtime */
+#define SET_PSCI_LIB_ARGS_V1(_p, _entry) do { \
+ SET_PARAM_HEAD(_p, PARAM_PSCI_LIB_ARGS, VERSION_1, 0); \
+ (_p)->mailbox_ep = (_entry); \
+ } while (0)
+
+/* Helper macro to define the psci_lib_args_t statically */
+#define DEFINE_STATIC_PSCI_LIB_ARGS_V1(_name, _entry) \
+ static const psci_lib_args_t (_name) = { \
+ .h.type = (uint8_t)PARAM_PSCI_LIB_ARGS, \
+ .h.version = (uint8_t)VERSION_1, \
+ .h.size = (uint16_t)sizeof(_name), \
+ .h.attr = 0, \
+ .mailbox_ep = (_entry) \
+ }
+
+/* Helper macro to verify the pointer to psci_lib_args_t structure */
+#define VERIFY_PSCI_LIB_ARGS_V1(_p) ((_p) \
+ && ((_p)->h.type == PARAM_PSCI_LIB_ARGS) \
+ && ((_p)->h.version == VERSION_1) \
+ && ((_p)->h.size == sizeof(*(_p))) \
+ && ((_p)->h.attr == 0) \
+ && ((_p)->mailbox_ep))
/******************************************************************************
* PSCI Library Interfaces
@@ -372,11 +408,11 @@
void *cookie,
void *handle,
u_register_t flags);
-int psci_setup(uintptr_t mailbox_ep);
+int psci_setup(const psci_lib_args_t *lib_args);
void psci_warmboot_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
void psci_prepare_next_non_secure_ctx(
- struct entry_point_info *next_image_info);
+ entry_point_info_t *next_image_info);
#endif /*__ASSEMBLY__*/
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
index d2e8729..29fcffe 100644
--- a/include/plat/arm/common/plat_arm.h
+++ b/include/plat/arm/common/plat_arm.h
@@ -42,6 +42,7 @@
******************************************************************************/
struct bl31_params;
struct meminfo;
+struct image_info;
#define ARM_CASSERT_MMAP \
CASSERT((ARRAY_SIZE(plat_arm_mmap) + ARM_BL_REGIONS) \
@@ -164,8 +165,13 @@
void arm_bl2u_plat_arch_setup(void);
/* BL31 utility functions */
+#if LOAD_IMAGE_V2
+void arm_bl31_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2);
+#else
void arm_bl31_early_platform_setup(struct bl31_params *from_bl2,
void *plat_params_from_bl2);
+#endif /* LOAD_IMAGE_V2 */
void arm_bl31_platform_setup(void);
void arm_bl31_plat_runtime_setup(void);
void arm_bl31_plat_arch_setup(void);
@@ -174,7 +180,8 @@
void arm_tsp_early_platform_setup(void);
/* SP_MIN utility functions */
-void arm_sp_min_early_platform_setup(void);
+void arm_sp_min_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2);
/* FIP TOC validity check */
int arm_io_is_toc_valid(void);
@@ -194,6 +201,14 @@
void plat_arm_interconnect_enter_coherency(void);
void plat_arm_interconnect_exit_coherency(void);
+#if LOAD_IMAGE_V2
+/*
+ * This function is called after loading SCP_BL2 image and it is used to perform
+ * any platform-specific actions required to handle the SCP firmware.
+ */
+int plat_arm_bl2_handle_scp_bl2(struct image_info *scp_bl2_image_info);
+#endif
+
/*
* Optional functions required in ARM standard platforms
*/
diff --git a/include/plat/common/common_def.h b/include/plat/common/common_def.h
index d6b7772..e2c4513 100644
--- a/include/plat/common/common_def.h
+++ b/include/plat/common/common_def.h
@@ -41,9 +41,13 @@
/*
* Platform binary types for linking
*/
+#ifdef AARCH32
+#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
+#define PLATFORM_LINKER_ARCH arm
+#else
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
-
+#endif /* AARCH32 */
/*
* Generic platform constants
@@ -70,6 +74,18 @@
#define MAKE_ULL(x) x
#endif
+#if LOAD_IMAGE_V2
+#define BL2_IMAGE_DESC { \
+ .image_id = BL2_IMAGE_ID, \
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \
+ VERSION_2, image_info_t, 0), \
+ .image_info.image_base = BL2_BASE, \
+ .image_info.image_max_size = BL2_LIMIT - BL2_BASE,\
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, \
+ VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),\
+ .ep_info.pc = BL2_BASE, \
+}
+#else /* LOAD_IMAGE_V2 */
#define BL2_IMAGE_DESC { \
.image_id = BL2_IMAGE_ID, \
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \
@@ -79,6 +95,7 @@
VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),\
.ep_info.pc = BL2_BASE, \
}
+#endif /* LOAD_IMAGE_V2 */
/*
* The following constants identify the extents of the code & read-only data
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index 1d2a373..5b4d11d 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -44,6 +44,8 @@
struct entry_point_info;
struct bl31_params;
struct image_desc;
+struct bl_load_info;
+struct bl_params;
/*******************************************************************************
* plat_get_rotpk_info() flags
@@ -84,7 +86,7 @@
* Optional common functions (may be overridden)
******************************************************************************/
uintptr_t plat_get_my_stack(void);
-void plat_report_exception(unsigned long);
+void plat_report_exception(unsigned int exception_type);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);
void plat_error_handler(int err) __dead2;
@@ -138,6 +140,15 @@
void bl2_platform_setup(void);
struct meminfo *bl2_plat_sec_mem_layout(void);
+#if LOAD_IMAGE_V2
+/*
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ */
+int bl2_plat_handle_post_image_load(unsigned int image_id);
+
+#else /* LOAD_IMAGE_V2 */
+
/*
* This function returns a pointer to the shared memory that the platform has
* kept aside to pass trusted firmware related information that BL31
@@ -194,6 +205,8 @@
/* Gets the memory layout for BL32 */
void bl2_plat_get_bl32_meminfo(struct meminfo *mem_info);
+#endif /* LOAD_IMAGE_V2 */
+
/*******************************************************************************
* Optional BL2 functions (may be overridden)
******************************************************************************/
@@ -218,8 +231,13 @@
/*******************************************************************************
* Mandatory BL31 functions
******************************************************************************/
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2);
+#else
void bl31_early_platform_setup(struct bl31_params *from_bl2,
void *plat_params_from_bl2);
+#endif
void bl31_plat_arch_setup(void);
void bl31_platform_setup(void);
void bl31_plat_runtime_setup(void);
@@ -257,6 +275,31 @@
int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr);
int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr);
+#if LOAD_IMAGE_V2
+/*******************************************************************************
+ * Mandatory BL image load functions(may be overridden).
+ ******************************************************************************/
+/*
+ * This function returns pointer to the list of images that the
+ * platform has populated to load.
+ */
+struct bl_load_info *plat_get_bl_image_load_info(void);
+
+/*
+ * This function returns a pointer to the shared memory that the
+ * platform has kept aside to pass trusted firmware related
+ * information that next BL image could need.
+ */
+struct bl_params *plat_get_next_bl_params(void);
+
+/*
+ * This function flushes to main memory all the params that are
+ * passed to next image.
+ */
+void plat_flush_next_bl_params(void);
+
+#endif /* LOAD_IMAGE_V2 */
+
#if ENABLE_PLAT_COMPAT
/*
* The below declarations are to enable compatibility for the platform ports
diff --git a/include/services/std_svc.h b/include/services/std_svc.h
index 0feb2ea..38ce8bb 100644
--- a/include/services/std_svc.h
+++ b/include/services/std_svc.h
@@ -42,4 +42,13 @@
#define STD_SVC_VERSION_MAJOR 0x0
#define STD_SVC_VERSION_MINOR 0x1
+/*
+ * Get the ARM Standard Service argument from EL3 Runtime.
+ * This function must be implemented by EL3 Runtime and the
+ * `svc_mask` identifies the service. `svc_mask` is a bit
+ * mask identifying the range of SMC function IDs available
+ * to the service.
+ */
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask);
+
#endif /* __STD_SVC_H__ */
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
index 63ac1a7..fd7c6dd 100644
--- a/lib/aarch32/misc_helpers.S
+++ b/lib/aarch32/misc_helpers.S
@@ -32,7 +32,21 @@
#include <asm_macros.S>
#include <assert_macros.S>
+ .globl smc
.globl zeromem
+ .globl disable_mmu_icache_secure
+ .globl disable_mmu_secure
+
+func smc
+ /*
+ * For AArch32 only r0-r3 will be in the registers;
+ * rest r4-r6 will be pushed on to the stack. So here, we'll
+ * have to load them from the stack to registers r4-r6 explicitly.
+ * Clobbers: r4-r6
+ */
+ ldm sp, {r4, r5, r6}
+ smc #0
+endfunc smc
/* -----------------------------------------------------------------------
* void zeromem(void *mem, unsigned int length);
@@ -58,3 +72,25 @@
z_end:
bx lr
endfunc zeromem
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU in Secure State
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_secure
+ mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu:
+ ldcopr r0, SCTLR
+ bic r0, r0, r1
+ stcopr r0, SCTLR
+ isb // ensure MMU is off
+ dsb sy
+ bx lr
+endfunc disable_mmu_secure
+
+
+func disable_mmu_icache_secure
+ ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu
+endfunc disable_mmu_icache_secure
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
new file mode 100644
index 0000000..b51f997
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a32.h>
+#include <cpu_macros.S>
+
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a32_disable_smp
+ ldcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ bic r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT
+ stcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a32_disable_smp
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A32.
+ * Clobbers: r0-r1
+ * -------------------------------------------------
+ */
+func cortex_a32_reset_func
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ orr r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT
+ stcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ isb
+ bx lr
+endfunc cortex_a32_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A32.
+ * Clobbers: r0-r3
+ * ----------------------------------------------------
+ */
+func cortex_a32_core_pwr_dwn
+ push {lr}
+
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {lr}
+ b cortex_a32_disable_smp
+endfunc cortex_a32_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A32.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a32_cluster_pwr_dwn
+ push {lr}
+
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 cache.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 cache.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {lr}
+ b cortex_a32_disable_smp
+endfunc cortex_a32_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a32, CORTEX_A32_MIDR
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index 927a6f5..042ffbd 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -34,6 +34,7 @@
#include <cpu_data.h>
#include <cpu_macros.S>
+#if IMAGE_BL1 || IMAGE_BL32
/*
* The reset handler common to all platforms. After a matching
* cpu_ops structure entry is found, the correponding reset_handler
@@ -65,6 +66,9 @@
bx lr
endfunc reset_handler
+#endif /* IMAGE_BL1 || IMAGE_BL32 */
+
+#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
/*
* The prepare core power down function for all platforms. After
* the cpu_ops pointer is retrieved from cpu_data, the corresponding
@@ -132,6 +136,8 @@
pop {r4 - r6, pc}
endfunc init_cpu_ops
+#endif /* IMAGE_BL32 */
+
/*
* The below function returns the cpu_ops structure matching the
* midr of the core. It reads the MIDR and finds the matching
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index 263ab68..cb8b77d 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -206,10 +206,12 @@
* | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
* ------------------------------------------------
******************************************************************************/
-int psci_setup(uintptr_t mailbox_ep)
+int psci_setup(const psci_lib_args_t *lib_args)
{
const unsigned char *topology_tree;
+ assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));
+
/* Do the Architectural initialization */
psci_arch_setup();
@@ -234,8 +236,7 @@
*/
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
- assert(mailbox_ep);
- plat_setup_psci_ops(mailbox_ep, &psci_plat_pm_ops);
+ plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops);
assert(psci_plat_pm_ops);
/*
diff --git a/lib/semihosting/aarch32/semihosting_call.S b/lib/semihosting/aarch32/semihosting_call.S
new file mode 100644
index 0000000..0cc707a
--- /dev/null
+++ b/lib/semihosting/aarch32/semihosting_call.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+
+ .globl semihosting_call
+
+func semihosting_call
+ svc #0x123456
+ bx lr
+endfunc semihosting_call
diff --git a/plat/arm/board/common/aarch32/board_arm_helpers.S b/plat/arm/board/common/aarch32/board_arm_helpers.S
new file mode 100644
index 0000000..893267b
--- /dev/null
+++ b/plat/arm/board/common/aarch32/board_arm_helpers.S
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <v2m_def.h>
+
+ .globl plat_report_exception
+
+
+ /* -------------------------------------------------------
+ * void plat_report_exception(unsigned int type)
+ * Function to report an unhandled exception
+ * with platform-specific means.
+ * On FVP platform, it updates the LEDs
+ * to indicate where we are.
+ * SYS_LED[0] - 0x0
+ * SYS_LED[2:1] - 0x0
+ * SYS_LED[7:3] - Exception Mode.
+ * Clobbers: r0-r1
+ * -------------------------------------------------------
+ */
+func plat_report_exception
+ lsl r0, r0, #V2M_SYS_LED_EC_SHIFT
+ ldr r1, =V2M_SYSREGS_BASE
+ add r1, r1, #V2M_SYS_LED
+ str r0, [r1]
+ bx lr
+endfunc plat_report_exception
diff --git a/plat/arm/board/common/board_common.mk b/plat/arm/board/common/board_common.mk
index 49136e6..a6d4ce7 100644
--- a/plat/arm/board/common/board_common.mk
+++ b/plat/arm/board/common/board_common.mk
@@ -31,10 +31,8 @@
PLAT_INCLUDES += -Iinclude/plat/arm/board/common/ \
-Iinclude/plat/arm/board/common/drivers
-PLAT_BL_COMMON_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S
-ifeq (${ARCH}, aarch64)
-PLAT_BL_COMMON_SOURCES += plat/arm/board/common/aarch64/board_arm_helpers.S
-endif
+PLAT_BL_COMMON_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S \
+ plat/arm/board/common/${ARCH}/board_arm_helpers.S
BL1_SOURCES += plat/arm/board/common/drivers/norflash/norflash.c
diff --git a/plat/arm/board/fvp/aarch32/fvp_helpers.S b/plat/arm/board/fvp/aarch32/fvp_helpers.S
index 373036c..4c750cb 100644
--- a/plat/arm/board/fvp/aarch32/fvp_helpers.S
+++ b/plat/arm/board/fvp/aarch32/fvp_helpers.S
@@ -34,9 +34,22 @@
#include "../drivers/pwrc/fvp_pwrc.h"
#include "../fvp_def.h"
+ .globl plat_secondary_cold_boot_setup
.globl plat_get_my_entrypoint
.globl plat_is_my_cpu_primary
+ /* --------------------------------------------------------------------
+ * void plat_secondary_cold_boot_setup (void);
+ *
+ * For AArch32, cold-booting secondary CPUs is not yet
+ * implemented and they panic.
+ * --------------------------------------------------------------------
+ */
+func plat_secondary_cold_boot_setup
+cb_panic:
+ b cb_panic
+endfunc plat_secondary_cold_boot_setup
+
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
*
diff --git a/plat/arm/board/fvp/fvp_bl31_setup.c b/plat/arm/board/fvp/fvp_bl31_setup.c
index 2ee3ba5..f16d6f0 100644
--- a/plat/arm/board/fvp/fvp_bl31_setup.c
+++ b/plat/arm/board/fvp/fvp_bl31_setup.c
@@ -31,9 +31,13 @@
#include <plat_arm.h>
#include "fvp_private.h"
-
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2)
+#else
void bl31_early_platform_setup(bl31_params_t *from_bl2,
void *plat_params_from_bl2)
+#endif
{
arm_bl31_early_platform_setup(from_bl2, plat_params_from_bl2);
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 2865569..9b827a6 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -109,12 +109,14 @@
lib/cpus/aarch64/cortex_a57.S \
lib/cpus/aarch64/cortex_a72.S \
lib/cpus/aarch64/cortex_a73.S
+else
+FVP_CPU_LIBS += lib/cpus/aarch32/cortex_a32.S
endif
BL1_SOURCES += drivers/io/io_semihosting.c \
lib/semihosting/semihosting.c \
- lib/semihosting/aarch64/semihosting_call.S \
- plat/arm/board/fvp/aarch64/fvp_helpers.S \
+ lib/semihosting/${ARCH}/semihosting_call.S \
+ plat/arm/board/fvp/${ARCH}/fvp_helpers.S \
plat/arm/board/fvp/fvp_bl1_setup.c \
plat/arm/board/fvp/fvp_err.c \
plat/arm/board/fvp/fvp_io_storage.c \
@@ -126,7 +128,7 @@
BL2_SOURCES += drivers/io/io_semihosting.c \
drivers/delay_timer/delay_timer.c \
lib/semihosting/semihosting.c \
- lib/semihosting/aarch64/semihosting_call.S \
+ lib/semihosting/${ARCH}/semihosting_call.S \
plat/arm/board/fvp/fvp_bl2_setup.c \
plat/arm/board/fvp/fvp_err.c \
plat/arm/board/fvp/fvp_io_storage.c \
diff --git a/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c b/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c
index d3bef82..735c4f0 100644
--- a/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c
+++ b/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c
@@ -31,9 +31,10 @@
#include <plat_arm.h>
#include "../fvp_private.h"
-void sp_min_early_platform_setup(void)
+void sp_min_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2)
{
- arm_sp_min_early_platform_setup();
+ arm_sp_min_early_platform_setup(from_bl2, plat_params_from_bl2);
/* Initialize the platform config for future decision making */
fvp_config_setup();
diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h
index c53e938..691e2f7 100644
--- a/plat/arm/board/juno/include/platform_def.h
+++ b/plat/arm/board/juno/include/platform_def.h
@@ -185,6 +185,12 @@
#define PLAT_CSS_PRIMARY_CPU_BIT_WIDTH 4
/*
+ * PLAT_CSS_MAX_SCP_BL2_SIZE is calculated using the current
+ * SCP_BL2 size plus a little space for growth.
+ */
+#define PLAT_CSS_MAX_SCP_BL2_SIZE 0x1D000
+
+/*
* Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
diff --git a/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c
new file mode 100644
index 0000000..9c92e5e
--- /dev/null
+++ b/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+ /* Fill SCP_BL2 related information if it exists */
+ {
+ .image_id = SCP_BL2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = SCP_BL2_BASE,
+ .image_info.image_max_size = PLAT_CSS_MAX_SCP_BL2_SIZE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+#endif /* SCP_BL2_BASE */
+
+ /* Fill BL32 related information */
+ {
+ .image_id = BL32_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = BL32_BASE,
+ .ep_info.spsr = SPSR_MODE32(MODE32_mon, SPSR_T_ARM,
+ SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+ .image_info.image_base = BL32_BASE,
+ .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+ .next_handoff_image_id = BL33_IMAGE_ID,
+ },
+
+ /* Fill BL33 related information */
+ {
+ .image_id = BL33_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+#ifdef PRELOADED_BL33_BASE
+ .ep_info.pc = PRELOADED_BL33_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+#else
+ .ep_info.pc = PLAT_ARM_NS_IMAGE_OFFSET,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = PLAT_ARM_NS_IMAGE_OFFSET,
+ .image_info.image_max_size = ARM_DRAM1_SIZE,
+#endif /* PRELOADED_BL33_BASE */
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ }
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c
new file mode 100644
index 0000000..64315f7
--- /dev/null
+++ b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+ /* Fill SCP_BL2 related information if it exists */
+ {
+ .image_id = SCP_BL2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = SCP_BL2_BASE,
+ .image_info.image_max_size = PLAT_CSS_MAX_SCP_BL2_SIZE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+#endif /* SCP_BL2_BASE */
+
+#ifdef EL3_PAYLOAD_BASE
+ /* Fill EL3 payload related information (BL31 is EL3 payload)*/
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = EL3_PAYLOAD_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t,
+ IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+
+#else /* EL3_PAYLOAD_BASE */
+
+ /* Fill BL31 related information */
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = BL31_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+#if DEBUG
+ .ep_info.args.arg1 = ARM_BL31_PLAT_PARAM_VAL,
+#endif
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+ .image_info.image_base = BL31_BASE,
+ .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+# ifdef BL32_BASE
+ .next_handoff_image_id = BL32_IMAGE_ID,
+# else
+ .next_handoff_image_id = BL33_IMAGE_ID,
+# endif
+ },
+
+# ifdef BL32_BASE
+ /* Fill BL32 related information */
+ {
+ .image_id = BL32_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
+ .ep_info.pc = BL32_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = BL32_BASE,
+ .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+ .next_handoff_image_id = BL33_IMAGE_ID,
+ },
+# endif /* BL32_BASE */
+
+ /* Fill BL33 related information */
+ {
+ .image_id = BL33_IMAGE_ID,
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+# ifdef PRELOADED_BL33_BASE
+ .ep_info.pc = PRELOADED_BL33_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+# else
+ .ep_info.pc = PLAT_ARM_NS_IMAGE_OFFSET,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = PLAT_ARM_NS_IMAGE_OFFSET,
+ .image_info.image_max_size = ARM_DRAM1_SIZE,
+# endif /* PRELOADED_BL33_BASE */
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ }
+#endif /* EL3_PAYLOAD_BASE */
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c
index c94f0cd..50d102a 100644
--- a/plat/arm/common/arm_bl1_setup.c
+++ b/plat/arm/common/arm_bl1_setup.c
@@ -73,7 +73,6 @@
******************************************************************************/
void arm_bl1_early_platform_setup(void)
{
- const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
#if !ARM_DISABLE_TRUSTED_WDOG
/* Enable watchdog */
@@ -88,13 +87,15 @@
bl1_tzram_layout.total_base = ARM_BL_RAM_BASE;
bl1_tzram_layout.total_size = ARM_BL_RAM_SIZE;
+#if !LOAD_IMAGE_V2
/* Calculate how much RAM BL1 is using and how much remains free */
bl1_tzram_layout.free_base = ARM_BL_RAM_BASE;
bl1_tzram_layout.free_size = ARM_BL_RAM_SIZE;
reserve_mem(&bl1_tzram_layout.free_base,
&bl1_tzram_layout.free_size,
BL1_RAM_BASE,
- bl1_size);
+ BL1_RAM_LIMIT - BL1_RAM_BASE);
+#endif /* LOAD_IMAGE_V2 */
}
void bl1_early_platform_setup(void)
@@ -131,7 +132,11 @@
BL1_COHERENT_RAM_LIMIT
#endif
);
+#ifdef AARCH32
+ enable_mmu_secure(0);
+#else
enable_mmu_el3(0);
+#endif /* AARCH32 */
}
void bl1_plat_arch_setup(void)
diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c
index b6afaa7..a4fac0d 100644
--- a/plat/arm/common/arm_bl2_setup.c
+++ b/plat/arm/common/arm_bl2_setup.c
@@ -30,10 +30,13 @@
#include <arch_helpers.h>
#include <arm_def.h>
+#include <assert.h>
#include <bl_common.h>
#include <console.h>
-#include <platform_def.h>
+#include <debug.h>
+#include <desc_image_load.h>
#include <plat_arm.h>
+#include <platform_def.h>
#include <string.h>
#if USE_COHERENT_MEM
@@ -51,6 +54,17 @@
/* Data structure which holds the extents of the trusted SRAM for BL2 */
static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl2_early_platform_setup
+#pragma weak bl2_platform_setup
+#pragma weak bl2_plat_arch_setup
+#pragma weak bl2_plat_sec_mem_layout
+
+#if LOAD_IMAGE_V2
+
+#pragma weak bl2_plat_handle_post_image_load
+
+#else /* LOAD_IMAGE_V2 */
/*******************************************************************************
* This structure represents the superset of information that is passed to
@@ -72,10 +86,6 @@
/* Weak definitions may be overridden in specific ARM standard platform */
-#pragma weak bl2_early_platform_setup
-#pragma weak bl2_platform_setup
-#pragma weak bl2_plat_arch_setup
-#pragma weak bl2_plat_sec_mem_layout
#pragma weak bl2_plat_get_bl31_params
#pragma weak bl2_plat_get_bl31_ep_info
#pragma weak bl2_plat_flush_bl31_params
@@ -106,7 +116,7 @@
{
return &bl2_tzram_layout;
}
-#endif
+#endif /* ARM_BL31_IN_DRAM */
/*******************************************************************************
* This function assigns a pointer to the memory that the platform has kept
@@ -180,6 +190,7 @@
return &bl31_params_mem.bl31_ep_info;
}
+#endif /* LOAD_IMAGE_V2 */
/*******************************************************************************
* BL1 has passed the extents of the trusted SRAM that should be visible to BL2
@@ -235,7 +246,12 @@
BL2_COHERENT_RAM_LIMIT
#endif
);
+
+#ifdef AARCH32
+ enable_mmu_secure(0);
+#else
enable_mmu_el1(0);
+#endif
}
void bl2_plat_arch_setup(void)
@@ -243,6 +259,46 @@
arm_bl2_plat_arch_setup();
}
+#if LOAD_IMAGE_V2
+/*******************************************************************************
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+ int err = 0;
+ bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+ assert(bl_mem_params);
+
+ switch (image_id) {
+#ifdef AARCH64
+ case BL32_IMAGE_ID:
+ bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl32_entry();
+ break;
+#endif
+
+ case BL33_IMAGE_ID:
+ /* BL33 expects to receive the primary CPU MPID (through r0) */
+ bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+ bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl33_entry();
+ break;
+
+#ifdef SCP_BL2_BASE
+ case SCP_BL2_IMAGE_ID:
+ /* The subsequent handling of SCP_BL2 is platform specific */
+ err = plat_arm_bl2_handle_scp_bl2(&bl_mem_params->image_info);
+ if (err) {
+ WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
+ }
+ break;
+#endif
+ }
+
+ return err;
+}
+
+#else /* LOAD_IMAGE_V2 */
+
/*******************************************************************************
* Populate the extents of memory available for loading SCP_BL2 (if used),
* i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2.
@@ -321,3 +377,5 @@
bl33_meminfo->free_base = ARM_NS_DRAM1_BASE;
bl33_meminfo->free_size = ARM_NS_DRAM1_SIZE;
}
+
+#endif /* LOAD_IMAGE_V2 */
diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c
index 4ed2477..bc1ec11 100644
--- a/plat/arm/common/arm_bl31_setup.c
+++ b/plat/arm/common/arm_bl31_setup.c
@@ -34,6 +34,7 @@
#include <assert.h>
#include <bl_common.h>
#include <console.h>
+#include <debug.h>
#include <mmio.h>
#include <plat_arm.h>
#include <platform.h>
@@ -98,8 +99,13 @@
* while creating page tables. BL2 has flushed this information to memory, so
* we are guaranteed to pick up good data.
******************************************************************************/
+#if LOAD_IMAGE_V2
+void arm_bl31_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2)
+#else
void arm_bl31_early_platform_setup(bl31_params_t *from_bl2,
void *plat_params_from_bl2)
+#endif
{
/* Initialize the console to provide early debug support */
console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ,
@@ -135,13 +141,8 @@
bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
-#else
- /*
- * Check params passed from BL2 should not be NULL,
- */
- assert(from_bl2 != NULL);
- assert(from_bl2->h.type == PARAM_BL31);
- assert(from_bl2->h.version >= VERSION_1);
+#else /* RESET_TO_BL31 */
+
/*
* In debug builds, we pass a special value in 'plat_params_from_bl2'
* to verify platform parameters from BL2 to BL31.
@@ -150,6 +151,43 @@
assert(((unsigned long long)plat_params_from_bl2) ==
ARM_BL31_PLAT_PARAM_VAL);
+# if LOAD_IMAGE_V2
+ /*
+ * Check params passed from BL2 should not be NULL,
+ */
+ bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+ assert(params_from_bl2 != NULL);
+ assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+ assert(params_from_bl2->h.version >= VERSION_2);
+
+ bl_params_node_t *bl_params = params_from_bl2->head;
+
+ /*
+ * Copy BL33 and BL32 (if present), entry point information.
+ * They are stored in Secure RAM, in BL2's address space.
+ */
+ while (bl_params) {
+ if (bl_params->image_id == BL32_IMAGE_ID)
+ bl32_image_ep_info = *bl_params->ep_info;
+
+ if (bl_params->image_id == BL33_IMAGE_ID)
+ bl33_image_ep_info = *bl_params->ep_info;
+
+ bl_params = bl_params->next_params_info;
+ }
+
+ if (bl33_image_ep_info.pc == 0)
+ panic();
+
+# else /* LOAD_IMAGE_V2 */
+
+ /*
+ * Check params passed from BL2 should not be NULL,
+ */
+ assert(from_bl2 != NULL);
+ assert(from_bl2->h.type == PARAM_BL31);
+ assert(from_bl2->h.version >= VERSION_1);
+
/*
* Copy BL32 (if populated by BL2) and BL33 entry point information.
* They are stored in Secure RAM, in BL2's address space.
@@ -157,11 +195,18 @@
if (from_bl2->bl32_ep_info)
bl32_image_ep_info = *from_bl2->bl32_ep_info;
bl33_image_ep_info = *from_bl2->bl33_ep_info;
-#endif
+
+# endif /* LOAD_IMAGE_V2 */
+#endif /* RESET_TO_BL31 */
}
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2)
+#else
void bl31_early_platform_setup(bl31_params_t *from_bl2,
void *plat_params_from_bl2)
+#endif
{
arm_bl31_early_platform_setup(from_bl2, plat_params_from_bl2);
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 0b961ea..98d7219 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -116,7 +116,7 @@
drivers/io/io_storage.c \
plat/arm/common/arm_bl1_setup.c \
plat/arm/common/arm_io_storage.c \
- plat/common/aarch64/platform_up_stack.S
+ plat/common/${ARCH}/platform_up_stack.S
ifdef EL3_PAYLOAD_BASE
# Need the arm_program_trusted_mailbox() function to release secondary CPUs from
# their holding pen
@@ -128,7 +128,12 @@
drivers/io/io_storage.c \
plat/arm/common/arm_bl2_setup.c \
plat/arm/common/arm_io_storage.c \
- plat/common/aarch64/platform_up_stack.S
+ plat/common/${ARCH}/platform_up_stack.S
+ifeq (${LOAD_IMAGE_V2},1)
+BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c\
+ plat/arm/common/arm_image_load.c \
+ common/desc_image_load.c
+endif
BL2U_SOURCES += plat/arm/common/arm_bl2u_setup.c \
plat/common/aarch64/platform_up_stack.S
diff --git a/plat/arm/common/arm_image_load.c b/plat/arm/common/arm_image_load.c
new file mode 100644
index 0000000..cb6db77
--- /dev/null
+++ b/plat/arm/common/arm_image_load.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arm_def.h>
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+
+
+#pragma weak plat_flush_next_bl_params
+#pragma weak plat_get_bl_image_load_info
+#pragma weak plat_get_next_bl_params
+
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+ flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+ return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+ return get_next_bl_params_from_mem_params_desc();
+}
diff --git a/plat/arm/common/sp_min/arm_sp_min_setup.c b/plat/arm/common/sp_min/arm_sp_min_setup.c
index 927f30f..d48556e 100644
--- a/plat/arm/common/sp_min/arm_sp_min_setup.c
+++ b/plat/arm/common/sp_min/arm_sp_min_setup.c
@@ -30,6 +30,7 @@
#include <assert.h>
#include <console.h>
+#include <debug.h>
#include <mmio.h>
#include <plat_arm.h>
#include <platform.h>
@@ -58,10 +59,6 @@
#pragma weak sp_min_platform_setup
#pragma weak sp_min_plat_arch_setup
-#ifndef RESET_TO_SP_MIN
-#error (" RESET_TO_SP_MIN flag is expected to be set.")
-#endif
-
/*******************************************************************************
* Return a pointer to the 'entry_point_info' structure of the next image for the
@@ -86,15 +83,20 @@
}
/*******************************************************************************
- * Perform early platform setup. We expect SP_MIN is the first boot loader
- * image and RESET_TO_SP_MIN build option to be set.
+ * Perform early platform setup.
******************************************************************************/
-void arm_sp_min_early_platform_setup(void)
+void arm_sp_min_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2)
{
/* Initialize the console to provide early debug support */
console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ,
ARM_CONSOLE_BAUDRATE);
+#if RESET_TO_SP_MIN
+ /* There are no parameters from BL2 if SP_MIN is a reset vector */
+ assert(from_bl2 == NULL);
+ assert(plat_params_from_bl2 == NULL);
+
/* Populate entry point information for BL33 */
SET_PARAM_HEAD(&bl33_image_ep_info,
PARAM_EP,
@@ -104,18 +106,46 @@
* Tell SP_MIN where the non-trusted software image
* is located and the entry state information
*/
-#ifdef PRELOADED_BL33_BASE
- bl33_image_ep_info.pc = PRELOADED_BL33_BASE;
-#else
bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
-#endif
bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#else /* RESET_TO_SP_MIN */
+
+ /*
+ * Check params passed from BL2 should not be NULL,
+ */
+ bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+ assert(params_from_bl2 != NULL);
+ assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+ assert(params_from_bl2->h.version >= VERSION_2);
+
+ bl_params_node_t *bl_params = params_from_bl2->head;
+
+ /*
+ * Copy BL33 entry point information.
+ * They are stored in Secure RAM, in BL2's address space.
+ */
+ while (bl_params) {
+ if (bl_params->image_id == BL33_IMAGE_ID) {
+ bl33_image_ep_info = *bl_params->ep_info;
+ break;
+ }
+
+ bl_params = bl_params->next_params_info;
+ }
+
+ if (bl33_image_ep_info.pc == 0)
+ panic();
+
+#endif /* RESET_TO_SP_MIN */
+
}
-void sp_min_early_platform_setup(void)
+void sp_min_early_platform_setup(void *from_bl2,
+ void *plat_params_from_bl2)
{
- arm_sp_min_early_platform_setup();
+ arm_sp_min_early_platform_setup(from_bl2, plat_params_from_bl2);
/*
* Initialize Interconnect for this cluster during cold boot.
@@ -146,10 +176,10 @@
/*
* Do initial security configuration to allow DRAM/device access
* (if earlier BL has not already done so).
- * TODO: If RESET_TO_SP_MIN is not set, the security setup needs
- * to be skipped.
*/
+#if RESET_TO_SP_MIN
plat_arm_security_setup();
+#endif
/* Enable and initialize the System level generic timer */
mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
diff --git a/plat/arm/css/common/css_bl2_setup.c b/plat/arm/css/common/css_bl2_setup.c
index 15db8d1..11ca342 100644
--- a/plat/arm/css/common/css_bl2_setup.c
+++ b/plat/arm/css/common/css_bl2_setup.c
@@ -37,13 +37,21 @@
#include "css_scp_bootloader.h"
/* Weak definition may be overridden in specific CSS based platform */
+#if LOAD_IMAGE_V2
+#pragma weak plat_arm_bl2_handle_scp_bl2
+#else
#pragma weak bl2_plat_handle_scp_bl2
+#endif
/*******************************************************************************
* Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
* Return 0 on success, -1 otherwise.
******************************************************************************/
+#if LOAD_IMAGE_V2
+int plat_arm_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+#else
int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+#endif
{
int ret;
diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S
index 481dd68..069d96d 100644
--- a/plat/common/aarch32/platform_helpers.S
+++ b/plat/common/aarch32/platform_helpers.S
@@ -33,6 +33,7 @@
.weak plat_my_core_pos
.weak plat_reset_handler
+ .weak plat_disable_acp
.weak platform_mem_init
.weak plat_panic_handler
@@ -59,6 +60,15 @@
bx lr
endfunc plat_reset_handler
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform.
+ * -----------------------------------------------------
+ */
+func plat_disable_acp
+ bx lr
+endfunc plat_disable_acp
+
/* ---------------------------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
diff --git a/plat/common/aarch32/platform_up_stack.S b/plat/common/aarch32/platform_up_stack.S
new file mode 100644
index 0000000..8275aec
--- /dev/null
+++ b/plat/common/aarch32/platform_up_stack.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+ .globl plat_get_my_stack
+ .globl plat_set_my_stack
+
+ /* -----------------------------------------------------
+ * unsigned long plat_get_my_stack ()
+ *
+ * For cold-boot BL images, only the primary CPU needs
+ * a stack. This function returns the stack pointer for
+ * a stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func plat_get_my_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ bx lr
+endfunc plat_get_my_stack
+
+ /* -----------------------------------------------------
+ * void plat_set_my_stack ()
+ *
+ * For cold-boot BL images, only the primary CPU needs
+ * a stack. This function sets the stack pointer to a
+ * stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func plat_set_my_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ mov sp, r0
+ bx lr
+endfunc plat_set_my_stack
+
+ /* -----------------------------------------------------
+ * Per-cpu stacks in normal memory. Each cpu gets a
+ * stack of PLATFORM_STACK_SIZE bytes.
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+ PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 06647e0..e096601 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -28,6 +28,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <assert.h>
#include <debug.h>
#include <psci.h>
#include <runtime_svc.h>
@@ -41,6 +42,21 @@
0x108d905b, 0xf863, 0x47e8, 0xae, 0x2d,
0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2);
+/* Setup Standard Services */
+static int32_t std_svc_setup(void)
+{
+ uintptr_t svc_arg;
+
+ svc_arg = get_arm_std_svc_args(PSCI_FID_MASK);
+ assert(svc_arg);
+
+ /*
+ * PSCI is the only specification implemented as a Standard Service.
+ * The `psci_setup()` also does EL3 architectural setup.
+ */
+ return psci_setup((const psci_lib_args_t *)svc_arg);
+}
+
/*
* Top-level Standard Service SMC handler. This handler will in turn dispatch
* calls to PSCI SMC handler
@@ -93,6 +109,6 @@
OEN_STD_START,
OEN_STD_END,
SMC_TYPE_FAST,
- NULL,
+ std_svc_setup,
std_svc_smc_handler
);