Add Test Secure Payload (BL3-2) image
This patch adds a simple TSP as the BL3-2 image. The secure payload
executes in S-EL1. It paves the way for the addition of the TSP
dispatcher runtime service to BL3-1. The TSP and the dispatcher service
will serve as an example of the runtime firmware's ability to toggle
execution between the non-secure and secure states in response to SMC
request from the non-secure state. The TSP will be replaced by a
Trusted OS in a real system.
The TSP also exports a set of handlers which should be called in
response to a PSCI power management event e.g a cpu being suspended or
turned off. For now it runs out of Secure DRAM on the ARM FVP port and
will be moved to Secure SRAM later. The default translation table setup
code assumes that the caller is executing out of secure SRAM. Hence the
TSP exports its own translation table setup function.
The TSP only services Fast SMCs, is non-reentrant and non-interruptible.
It does arithmetic operations on two sets of four operands, one set
supplied by the non-secure client, and the other supplied by the TSP
dispatcher in EL3. It returns the result according to the Secure Monitor
Calling convention standard.
This TSP has two functional entry points:
- An initial, one-time entry point through which the TSP is initialized
and prepares for receiving further requests from secure
monitor/dispatcher
- A fast SMC service entry point through which the TSP dispatcher
requests secure services on behalf of the non-secure client
Change-Id: I24377df53399307e2560a025eb2c82ce98ab3931
Co-authored-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
new file mode 100644
index 0000000..fd02fd8
--- /dev/null
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <bl_common.h>
+#include <arch.h>
+#include <tsp.h>
+
+
+ .globl tsp_entrypoint
+ .globl tsp_cpu_on_entry
+ .globl tsp_cpu_off_entry
+ .globl tsp_cpu_suspend_entry
+ .globl tsp_cpu_resume_entry
+ .globl tsp_fast_smc_entry
+
+ /* ---------------------------------------------
+ * Populate the params in x0-x7 from the pointer
+ * to the smc args structure in x0.
+ * ---------------------------------------------
+ */
+ .macro restore_args_call_smc
+ ldp x6, x7, [x0, #TSP_ARG6]
+ ldp x4, x5, [x0, #TSP_ARG4]
+ ldp x2, x3, [x0, #TSP_ARG2]
+ ldp x0, x1, [x0, #TSP_ARG0]
+ smc #0
+ .endm
+
+ .section .text, "ax"; .align 3
+
+
+tsp_entrypoint: ; .type tsp_entrypoint, %function
+ /*---------------------------------------------
+ * Store the extents of the tzram available to
+ * BL32 for future use.
+ * TODO: We are assuming that x9-x10 will not be
+ * corrupted by any function before platform
+ * setup.
+ * ---------------------------------------------
+ */
+ mov x9, x0
+ mov x10, x1
+
+ /* ---------------------------------------------
+ * The entrypoint is expected to be executed
+ * only by the primary cpu (at least for now).
+ * So, make sure no secondary has lost its way.
+ * ---------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_is_primary_cpu
+ cbz x0, tsp_entrypoint_panic
+
+ /* ---------------------------------------------
+ * Set the exception vector to something sane.
+ * ---------------------------------------------
+ */
+ adr x0, early_exceptions
+ msr vbar_el1, x0
+
+ /* ---------------------------------------------
+ * Enable the instruction cache.
+ * ---------------------------------------------
+ */
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_I_BIT
+ msr sctlr_el1, x0
+ isb
+
+ /* ---------------------------------------------
+ * Zero out NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section.
+ * ---------------------------------------------
+ */
+ ldr x0, =__BSS_START__
+ ldr x1, =__BSS_SIZE__
+ bl zeromem16
+
+ ldr x0, =__COHERENT_RAM_START__
+ ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
+ bl zeromem16
+
+ /* --------------------------------------------
+ * Give ourselves a small coherent stack to
+ * ease the pain of initializing the MMU
+ * --------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_coherent_stack
+
+ /* ---------------------------------------------
+ * Perform early platform setup & platform
+ * specific early arch. setup e.g. mmu setup
+ * ---------------------------------------------
+ */
+ mov x0, x9
+ mov x1, x10
+ bl bl32_early_platform_setup
+ bl bl32_plat_arch_setup
+
+ /* ---------------------------------------------
+ * Give ourselves a stack allocated in Normal
+ * -IS-WBWA memory
+ * ---------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_stack
+
+ /* ---------------------------------------------
+ * Jump to main function.
+ * ---------------------------------------------
+ */
+ bl tsp_main
+
+ /* ---------------------------------------------
+ * Tell TSPD that we are done initialising
+ * ---------------------------------------------
+ */
+ mov x1, x0
+ mov x0, #TSP_ENTRY_DONE
+ smc #0
+
+tsp_entrypoint_panic:
+ b tsp_entrypoint_panic
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when this
+ * cpu is to be turned off through a CPU_OFF
+ * psci call to ask the TSP to perform any
+ * bookeeping necessary. In the current
+ * implementation, the TSPD expects the TSP to
+ * re-initialise its state so nothing is done
+ * here except for acknowledging the request.
+ * ---------------------------------------------
+ */
+tsp_cpu_off_entry: ; .type tsp_cpu_off_entry, %function
+ bl tsp_cpu_off_main
+ restore_args_call_smc
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when this
+ * cpu is turned on using a CPU_ON psci call to
+ * ask the TSP to initialise itself i.e. setup
+ * the mmu, stacks etc. Minimal architectural
+ * state will be initialised by the TSPD when
+ * this function is entered i.e. Caches and MMU
+ * will be turned off, the execution state
+ * will be aarch64 and exceptions masked.
+ * ---------------------------------------------
+ */
+tsp_cpu_on_entry: ; .type tsp_cpu_on_entry, %function
+ /* ---------------------------------------------
+ * Set the exception vector to something sane.
+ * ---------------------------------------------
+ */
+ adr x0, early_exceptions
+ msr vbar_el1, x0
+
+ /* ---------------------------------------------
+ * Enable the instruction cache.
+ * ---------------------------------------------
+ */
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_I_BIT
+ msr sctlr_el1, x0
+ isb
+
+ /* --------------------------------------------
+ * Give ourselves a small coherent stack to
+ * ease the pain of initializing the MMU
+ * --------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_coherent_stack
+
+ /* ---------------------------------------------
+ * Initialise the MMU
+ * ---------------------------------------------
+ */
+ bl enable_mmu
+
+ /* ---------------------------------------------
+ * Give ourselves a stack allocated in Normal
+ * -IS-WBWA memory
+ * ---------------------------------------------
+ */
+ mrs x0, mpidr_el1
+ bl platform_set_stack
+
+ /* ---------------------------------------------
+ * Enter C runtime to perform any remaining
+ * book keeping
+ * ---------------------------------------------
+ */
+ bl tsp_cpu_on_main
+ restore_args_call_smc
+
+ /* Should never reach here */
+tsp_cpu_on_entry_panic:
+ b tsp_cpu_on_entry_panic
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when this
+ * cpu is to be suspended through a CPU_SUSPEND
+ * psci call to ask the TSP to perform any
+ * bookeeping necessary. In the current
+ * implementation, the TSPD saves and restores
+ * the EL1 state.
+ * ---------------------------------------------
+ */
+tsp_cpu_suspend_entry: ; .type tsp_cpu_suspend_entry, %function
+ bl tsp_cpu_suspend_main
+ restore_args_call_smc
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when this
+ * cpu resumes execution after an earlier
+ * CPU_SUSPEND psci call to ask the TSP to
+ * restore its saved context. In the current
+ * implementation, the TSPD saves and restores
+ * EL1 state so nothing is done here apart from
+ * acknowledging the request.
+ * ---------------------------------------------
+ */
+tsp_cpu_resume_entry: ; .type tsp_cpu_resume_entry, %function
+ bl tsp_cpu_resume_main
+ restore_args_call_smc
+tsp_cpu_resume_panic:
+ b tsp_cpu_resume_panic
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD to ask
+ * the TSP to service a fast smc request.
+ * ---------------------------------------------
+ */
+tsp_fast_smc_entry: ; .type tsp_fast_smc_entry, %function
+ bl tsp_fast_smc_handler
+ restore_args_call_smc
+tsp_fast_smc_entry_panic:
+ b tsp_fast_smc_entry_panic
+
diff --git a/bl32/tsp/aarch64/tsp_request.S b/bl32/tsp/aarch64/tsp_request.S
new file mode 100644
index 0000000..763e8fc
--- /dev/null
+++ b/bl32/tsp/aarch64/tsp_request.S
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tsp.h>
+
+ .globl tsp_get_magic
+
+ .section .text, "ax"; .align 3
+
+
+/*
+ * This function raises an SMC to retrieve arguments from secure
+ * monitor/dispatcher, saves the returned arguments the array received in x0,
+ * and then returns to the caller
+ */
+tsp_get_magic:
+ /* Save address to stack */
+ stp x0, xzr, [sp, #-16]!
+
+ /* Load arguments */
+ ldr w0, _tsp_fid_get_magic
+
+ /* Raise SMC */
+ smc #0
+
+ /* Restore address from stack */
+ ldp x4, xzr, [sp], #16
+
+ /* Store returned arguments to the array */
+ stp x0, x1, [x4, #0]
+ stp x2, x3, [x4, #16]
+
+ ret
+
+ .align 2
+_tsp_fid_get_magic:
+ .word TSP_GET_ARGS
diff --git a/bl32/tsp/tsp-fvp.mk b/bl32/tsp/tsp-fvp.mk
new file mode 100644
index 0000000..c5bbd7f
--- /dev/null
+++ b/bl32/tsp/tsp-fvp.mk
@@ -0,0 +1,40 @@
+#
+# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+vpath %.c ${PLAT_BL2_C_VPATH}
+vpath %.S ${PLAT_BL2_S_VPATH}
+
+# TSP source files specific to FVP platform
+BL32_OBJS += bl32_plat_setup.o \
+ bl32_setup_xlat.o \
+ plat_common.o \
+ ${BL_COMMON_OBJS} \
+ ${PLAT_BL_COMMON_OBJS}
diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S
new file mode 100644
index 0000000..30d9a93
--- /dev/null
+++ b/bl32/tsp/tsp.ld.S
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+
+MEMORY {
+ RAM (rwx): ORIGIN = TZDRAM_BASE, LENGTH = TZDRAM_SIZE
+}
+
+
+SECTIONS
+{
+ . = BL32_BASE;
+ ASSERT(. == ALIGN(4096),
+ "BL32_BASE address is not aligned on a page boundary.")
+
+ ro . : {
+ __RO_START__ = .;
+ *tsp_entrypoint.o(.text)
+ *(.text)
+ *(.rodata*)
+ *(.vectors)
+ __RO_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked as
+ * read-only, executable. No RW data from the next section must
+ * creep in. Ensure the rest of the current memory page is unused.
+ */
+ . = NEXT(4096);
+ __RO_END__ = .;
+ } >RAM
+
+ .data . : {
+ __DATA_START__ = .;
+ *(.data)
+ __DATA_END__ = .;
+ } >RAM
+
+ stacks (NOLOAD) : {
+ __STACKS_START__ = .;
+ *(tzfw_normal_stacks)
+ __STACKS_END__ = .;
+ } >RAM
+
+ /*
+ * The .bss section gets initialised to 0 at runtime.
+ * Its base address must be 16-byte aligned.
+ */
+ .bss : ALIGN(16) {
+ __BSS_START__ = .;
+ *(SORT_BY_ALIGNMENT(.bss))
+ *(COMMON)
+ __BSS_END__ = .;
+ } >RAM
+
+ /*
+ * The xlat_table section is for full, aligned page tables (4K).
+ * Removing them from .bss avoids forcing 4K alignment on
+ * the .bss section and eliminates the unecessary zero init
+ */
+ xlat_table (NOLOAD) : {
+ *(xlat_table)
+ } >RAM
+
+ /*
+ * The base address of the coherent memory section must be page-aligned (4K)
+ * to guarantee that the coherent data are stored on their own pages and
+ * are not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ coherent_ram (NOLOAD) : ALIGN(4096) {
+ __COHERENT_RAM_START__ = .;
+ *(tzfw_coherent_mem)
+ __COHERENT_RAM_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked
+ * as device memory. No other unexpected data must creep in.
+ * Ensure the rest of the current memory page is unused.
+ */
+ . = NEXT(4096);
+ __COHERENT_RAM_END__ = .;
+ } >RAM
+
+ __BL2_END__ = .;
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+ __COHERENT_RAM_UNALIGNED_SIZE__ =
+ __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+
+ ASSERT(. <= TZDRAM_BASE + (1 << 21), "BL32 image does not fit in the first 2MB of Trusted DRAM.")
+}
diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk
new file mode 100644
index 0000000..923af5c
--- /dev/null
+++ b/bl32/tsp/tsp.mk
@@ -0,0 +1,57 @@
+#
+# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+vpath %.c common \
+ lib \
+ plat/${PLAT} \
+ plat/${PLAT}/${ARCH} \
+ arch/${ARCH}
+
+vpath %.S lib/arch/${ARCH} \
+ include \
+ lib/sync/locks/exclusive
+
+BL32_OBJS += tsp_entrypoint.o \
+ tsp_main.o \
+ tsp_request.o \
+ spinlock.o \
+ early_exceptions.o
+
+BL32_ENTRY_POINT := tsp_entrypoint
+BL32_MAPFILE := tsp.map
+BL32_LINKERFILE := tsp.ld
+
+vpath %.ld.S ${BL32_ROOT}
+vpath %.c ${BL32_ROOT}
+vpath %.c ${BL32_ROOT}/${ARCH}
+vpath %.S ${BL32_ROOT}/${ARCH}
+
+# Include an optional platform-specific TSP Makefile
+-include bl32/tsp/tsp-${PLAT}.mk
diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c
new file mode 100644
index 0000000..258bee4
--- /dev/null
+++ b/bl32/tsp/tsp_main.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <bl32.h>
+#include <tsp.h>
+#include <arch_helpers.h>
+#include <stdio.h>
+#include <platform.h>
+#include <debug.h>
+#include <spinlock.h>
+
+/*******************************************************************************
+ * Lock to control access to the console
+ ******************************************************************************/
+spinlock_t console_lock;
+
+/*******************************************************************************
+ * Per cpu data structure to populate parameters for an SMC in C code and use
+ * a pointer to this structure in assembler code to populate x0-x7
+ ******************************************************************************/
+static tsp_args tsp_smc_args[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Per cpu data structure to keep track of TSP activity
+ ******************************************************************************/
+static work_statistics tsp_stats[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Single reference to the various entry points exported by the test secure
+ * payload. A single copy should suffice for all cpus as they are not expected
+ * to change.
+ ******************************************************************************/
+static const entry_info tsp_entry_info = {
+ tsp_fast_smc_entry,
+ tsp_cpu_on_entry,
+ tsp_cpu_off_entry,
+ tsp_cpu_resume_entry,
+ tsp_cpu_suspend_entry,
+};
+
+static tsp_args *set_smc_args(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint64_t mpidr = read_mpidr();
+ uint32_t linear_id;
+ tsp_args *pcpu_smc_args;
+
+ /*
+ * Return to Secure Monitor by raising an SMC. The results of the
+ * service are passed as an arguments to the SMC
+ */
+ linear_id = platform_get_core_pos(mpidr);
+ pcpu_smc_args = &tsp_smc_args[linear_id];
+ write_sp_arg(pcpu_smc_args, TSP_ARG0, arg0);
+ write_sp_arg(pcpu_smc_args, TSP_ARG1, arg1);
+ write_sp_arg(pcpu_smc_args, TSP_ARG2, arg2);
+ write_sp_arg(pcpu_smc_args, TSP_ARG3, arg3);
+ write_sp_arg(pcpu_smc_args, TSP_ARG4, arg4);
+ write_sp_arg(pcpu_smc_args, TSP_ARG5, arg5);
+ write_sp_arg(pcpu_smc_args, TSP_ARG6, arg6);
+ write_sp_arg(pcpu_smc_args, TSP_ARG7, arg7);
+
+ return pcpu_smc_args;
+}
+
+/*******************************************************************************
+ * TSP main entry point where it gets the opportunity to initialize its secure
+ * state/applications. Once the state is initialized, it must return to the
+ * SPD with a pointer to the 'tsp_entry_info' structure.
+ ******************************************************************************/
+uint64_t tsp_main(void)
+{
+ uint64_t mpidr = read_mpidr();
+ uint32_t linear_id = platform_get_core_pos(mpidr);
+
+#if DEBUG
+ meminfo *mem_layout = bl32_plat_sec_mem_layout();
+#endif
+
+ /* Initialize the platform */
+ bl32_platform_setup();
+
+ /* Initialize secure/applications state here */
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_on_count++;
+
+ spin_lock(&console_lock);
+#if defined (__GNUC__)
+ printf("TSP Built : %s, %s\n\r", __TIME__, __DATE__);
+#endif
+ INFO("Total memory base : 0x%x\n", mem_layout->total_base);
+ INFO("Total memory size : 0x%x bytes\n", mem_layout->total_size);
+ INFO("Free memory base : 0x%x\n", mem_layout->free_base);
+ INFO("Free memory size : 0x%x bytes\n", mem_layout->free_size);
+ INFO("cpu 0x%x: %d smcs, %d erets %d cpu on requests\n", mpidr,
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_on_count);
+ spin_unlock(&console_lock);
+
+ /*
+ * TODO: There is a massive assumption that the SPD and SP can see each
+ * other's memory without issues so it is safe to pass pointers to
+ * internal memory. Replace this with a shared communication buffer.
+ */
+ return (uint64_t) &tsp_entry_info;
+}
+
+/*******************************************************************************
+ * This function performs any remaining book keeping in the test secure payload
+ * after this cpu's architectural state has been setup in response to an earlier
+ * psci cpu_on request.
+ ******************************************************************************/
+tsp_args *tsp_cpu_on_main(void)
+{
+ uint64_t mpidr = read_mpidr();
+ uint32_t linear_id = platform_get_core_pos(mpidr);
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_on_count++;
+
+ spin_lock(&console_lock);
+ printf("SP: cpu 0x%x turned on\n\r", mpidr);
+ INFO("cpu 0x%x: %d smcs, %d erets %d cpu on requests\n", mpidr,
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_on_count);
+ spin_unlock(&console_lock);
+
+ /* Indicate to the SPD that we have completed turned ourselves on */
+ return set_smc_args(TSP_ON_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function performs any remaining book keeping in the test secure payload
+ * before this cpu is turned off in response to a psci cpu_off request.
+ ******************************************************************************/
+tsp_args *tsp_cpu_off_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint64_t mpidr = read_mpidr();
+ uint32_t linear_id = platform_get_core_pos(mpidr);
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_off_count++;
+
+ spin_lock(&console_lock);
+ printf("SP: cpu 0x%x off request\n\r", mpidr);
+ INFO("cpu 0x%x: %d smcs, %d erets %d cpu off requests\n", mpidr,
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_off_count);
+ spin_unlock(&console_lock);
+
+
+ /*
+ * Indicate to the SPD that we have completed
+ * this initialisation request.
+ */
+ return set_smc_args(TSP_OFF_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function performs any book keeping in the test secure payload before
+ * this cpu's architectural state is saved in response to an earlier psci
+ * cpu_suspend request.
+ ******************************************************************************/
+tsp_args *tsp_cpu_suspend_main(uint64_t power_state,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint64_t mpidr = read_mpidr();
+ uint32_t linear_id = platform_get_core_pos(mpidr);
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_suspend_count++;
+
+ spin_lock(&console_lock);
+ printf("SP: cpu 0x%x suspend request. power state: 0x%x\n\r",
+ mpidr, power_state);
+ INFO("cpu 0x%x: %d smcs, %d erets %d cpu suspend requests\n", mpidr,
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_suspend_count);
+ spin_unlock(&console_lock);
+
+ /*
+ * Indicate to the SPD that we have completed
+ * this initialisation request.
+ */
+ return set_smc_args(TSP_SUSPEND_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function performs any book keeping in the test secure payload after this
+ * cpu's architectural state has been restored after wakeup from an earlier psci
+ * cpu_suspend request.
+ ******************************************************************************/
+tsp_args *tsp_cpu_resume_main(uint64_t suspend_level,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint64_t mpidr = read_mpidr();
+ uint32_t linear_id = platform_get_core_pos(mpidr);
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_resume_count++;
+
+ spin_lock(&console_lock);
+ printf("SP: cpu 0x%x resumed. suspend level %d \n\r",
+ mpidr, suspend_level);
+ INFO("cpu 0x%x: %d smcs, %d erets %d cpu suspend requests\n", mpidr,
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_suspend_count);
+ spin_unlock(&console_lock);
+
+ /*
+ * Indicate to the SPD that we have completed
+ * this initialisation request.
+ */
+ return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * TSP fast smc handler. The secure monitor jumps to this function by
+ * doing the ERET after populating X0-X7 registers. The arguments are received
+ * in the function arguments in order. Once the service is rendered, this
+ * function returns to Secure Monitor by raising SMC
+ ******************************************************************************/
+tsp_args *tsp_fast_smc_handler(uint64_t func,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint64_t results[4];
+ uint64_t service_args[4];
+
+ INFO("Received fast smc 0x%x on cpu 0x%x\n", func, read_mpidr());
+
+ /* Render sercure services and obtain results here */
+
+ results[0] = arg1;
+ results[1] = arg2;
+ results[2] = arg3;
+ results[3] = arg4;
+
+ /*
+ * Request a service back from dispatcher/secure monitor. This call
+ * return and thereafter resume exectuion
+ */
+ tsp_get_magic(service_args);
+
+ /* Determine the function to perform based on the function ID */
+ switch (func) {
+ case TSP_FID_ADD:
+ results[0] += service_args[0];
+ results[1] += service_args[1];
+ results[2] += service_args[2];
+ results[3] += service_args[3];
+ break;
+ case TSP_FID_SUB:
+ results[0] -= service_args[0];
+ results[1] -= service_args[1];
+ results[2] -= service_args[2];
+ results[3] -= service_args[3];
+ break;
+ case TSP_FID_MUL:
+ results[0] *= service_args[0];
+ results[1] *= service_args[1];
+ results[2] *= service_args[2];
+ results[3] *= service_args[3];
+ break;
+ case TSP_FID_DIV:
+ results[0] /= service_args[0] ? service_args[0] : 1;
+ results[1] /= service_args[1] ? service_args[1] : 1;
+ results[2] /= service_args[2] ? service_args[2] : 1;
+ results[3] /= service_args[3] ? service_args[3] : 1;
+ break;
+ default:
+ break;
+ }
+
+ return set_smc_args(TSP_WORK_DONE,
+ results[0],
+ results[1],
+ results[2],
+ results[3],
+ 0, 0, 0);
+}
+
diff --git a/include/bl32.h b/include/bl32.h
new file mode 100644
index 0000000..88e18bd
--- /dev/null
+++ b/include/bl32.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __BL32_H__
+#define __BL32_H__
+
+#ifndef __ASSEMBLY__
+#include <stdint.h>
+
+#include <bl_common.h>
+
+extern void bl32_platform_setup(void);
+extern meminfo *bl32_plat_sec_mem_layout(void);
+extern uint64_t bl32_main(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __BL32_H__ */
diff --git a/include/tsp.h b/include/tsp.h
new file mode 100644
index 0000000..69dda05
--- /dev/null
+++ b/include/tsp.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TSP_H__
+#define __TSP_H__
+
+#include <bl_common.h>
+#include <platform.h>
+
+/*
+ * SMC function IDs that TSP uses to signal various forms of completions
+ * to the secure payload dispatcher.
+ */
+#define TSP_ENTRY_DONE 0xf2000000
+#define TSP_ON_DONE 0xf2000001
+#define TSP_OFF_DONE 0xf2000002
+#define TSP_SUSPEND_DONE 0xf2000003
+#define TSP_RESUME_DONE 0xf2000004
+#define TSP_WORK_DONE 0xf2000005
+
+/* SMC function ID that TSP uses to request service from secure montior */
+#define TSP_GET_ARGS 0xf2001000
+
+/* Function IDs for various TSP services */
+#define TSP_FID_ADD 0xf2002000
+#define TSP_FID_SUB 0xf2002001
+#define TSP_FID_MUL 0xf2002002
+#define TSP_FID_DIV 0xf2002003
+
+/* Definitions to help the assembler access the SMC/ERET args structure */
+#define TSP_ARGS_SIZE 0x40
+#define TSP_ARG0 0x0
+#define TSP_ARG1 0x8
+#define TSP_ARG2 0x10
+#define TSP_ARG3 0x18
+#define TSP_ARG4 0x20
+#define TSP_ARG5 0x28
+#define TSP_ARG6 0x30
+#define TSP_ARG7 0x38
+#define TSP_ARGS_END 0x40
+
+#ifndef __ASSEMBLY__
+#include <stdint.h>
+
+typedef void (*tsp_generic_fptr)(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+
+typedef struct {
+ tsp_generic_fptr fast_smc_entry;
+ tsp_generic_fptr cpu_on_entry;
+ tsp_generic_fptr cpu_off_entry;
+ tsp_generic_fptr cpu_resume_entry;
+ tsp_generic_fptr cpu_suspend_entry;
+} entry_info;
+
+typedef struct {
+ uint32_t smc_count; /* Number of returns on this cpu */
+ uint32_t eret_count; /* Number of entries on this cpu */
+ uint32_t cpu_on_count; /* Number of cpu on requests */
+ uint32_t cpu_off_count; /* Number of cpu off requests */
+ uint32_t cpu_suspend_count; /* Number of cpu suspend requests */
+ uint32_t cpu_resume_count; /* Number of cpu resume requests */
+} __aligned(CACHE_WRITEBACK_GRANULE) work_statistics;
+
+typedef struct {
+ uint64_t _regs[TSP_ARGS_END >> 3];
+} __aligned(CACHE_WRITEBACK_GRANULE) tsp_args;
+
+/* Macros to access members of the above structure using their offsets */
+#define read_sp_arg(args, offset) ((args)->_regs[offset >> 3])
+#define write_sp_arg(args, offset, val)(((args)->_regs[offset >> 3]) \
+ = val)
+
+/*
+ * Ensure that the assembler's view of the size of the tsp_args is the
+ * same as the compilers
+ */
+CASSERT(TSP_ARGS_SIZE == sizeof(tsp_args), assert_sp_args_size_mismatch);
+
+extern void tsp_get_magic(uint64_t args[4]);
+
+extern void tsp_fast_smc_entry(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+extern void tsp_cpu_resume_entry(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+extern tsp_args *tsp_cpu_resume_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+extern void tsp_cpu_suspend_entry(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+extern tsp_args *tsp_cpu_suspend_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+extern void tsp_cpu_on_entry(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+extern tsp_args *tsp_cpu_on_main(void);
+extern void tsp_cpu_off_entry(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+extern tsp_args *tsp_cpu_off_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __BL2_H__ */
diff --git a/plat/fvp/aarch64/bl32_setup_xlat.c b/plat/fvp/aarch64/bl32_setup_xlat.c
new file mode 100644
index 0000000..c4f422c
--- /dev/null
+++ b/plat/fvp/aarch64/bl32_setup_xlat.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <platform.h>
+#include <bl_common.h>
+
+#define BL32_NUM_L3_PAGETABLES 3
+#define BL32_TZRAM_PAGETABLE 0
+#define BL32_TZDRAM_PAGETABLE 1
+#define BL32_NSRAM_PAGETABLE 2
+
+/*******************************************************************************
+ * Level 1 translation tables need 4 entries for the 4GB address space accessib-
+ * le by the secure firmware. Input address space will be restricted using the
+ * T0SZ settings in the TCR.
+ ******************************************************************************/
+static unsigned long l1_xlation_table[ADDR_SPACE_SIZE >> 30]
+__attribute__ ((aligned((ADDR_SPACE_SIZE >> 30) << 3)));
+
+/*******************************************************************************
+ * Level 2 translation tables describe the first & second gb of the address
+ * space needed to address secure peripherals e.g. trusted ROM and RAM.
+ ******************************************************************************/
+static unsigned long l2_xlation_table[NUM_L2_PAGETABLES][NUM_2MB_IN_GB]
+__attribute__ ((aligned(NUM_2MB_IN_GB << 3),
+ section("xlat_table")));
+
+/*******************************************************************************
+ * Level 3 translation tables (2 sets) describe the trusted & non-trusted RAM
+ * regions at a granularity of 4K.
+ ******************************************************************************/
+static unsigned long l3_xlation_table[BL32_NUM_L3_PAGETABLES][NUM_4K_IN_2MB]
+__attribute__ ((aligned(NUM_4K_IN_2MB << 3),
+ section("xlat_table")));
+
+/*******************************************************************************
+ * Create page tables as per the platform memory map. Certain aspects of page
+ * talble creating have been abstracted in the above routines. This can be impr-
+ * oved further.
+ * TODO: Move the page table setup helpers into the arch or lib directory
+ *******************************************************************************/
+unsigned long fill_xlation_tables(meminfo *tzdram_layout,
+ unsigned long ro_start,
+ unsigned long ro_limit,
+ unsigned long coh_start,
+ unsigned long coh_limit)
+{
+ unsigned long l2_desc, l3_desc;
+ unsigned long *xt_addr = 0, *pt_addr, off = 0;
+ unsigned long trom_start_index, trom_end_index;
+ unsigned long tzram_start_index, tzram_end_index;
+ unsigned long flash0_start_index, flash0_end_index;
+ unsigned long flash1_start_index, flash1_end_index;
+ unsigned long vram_start_index, vram_end_index;
+ unsigned long nsram_start_index, nsram_end_index;
+ unsigned long tzdram_start_index, tzdram_end_index;
+ unsigned long dram_start_index, dram_end_index;
+ unsigned long dev0_start_index, dev0_end_index;
+ unsigned long dev1_start_index, dev1_end_index;
+ unsigned int idx;
+
+ /*****************************************************************
+ * LEVEL1 PAGETABLE SETUP
+ *
+ * Find the start and end indices of the memory peripherals in the
+ * first level pagetables. These are the main areas we care about.
+ * Also bump the end index by one if its equal to the start to
+ * allow for regions which lie completely in a GB.
+ *****************************************************************/
+ trom_start_index = ONE_GB_INDEX(TZROM_BASE);
+ dev0_start_index = ONE_GB_INDEX(TZRNG_BASE);
+ dram_start_index = ONE_GB_INDEX(DRAM_BASE);
+ dram_end_index = ONE_GB_INDEX(DRAM_BASE + DRAM_SIZE);
+
+ if (dram_end_index == dram_start_index)
+ dram_end_index++;
+
+ /*
+ * Fill up the level1 translation table first
+ */
+ for (idx = 0; idx < (ADDR_SPACE_SIZE >> 30); idx++) {
+
+ /*
+ * Fill up the entry for the TZROM. This will cover
+ * everything in the first GB.
+ */
+ if (idx == trom_start_index) {
+ xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][0];
+ l1_xlation_table[idx] = create_table_desc(xt_addr);
+ continue;
+ }
+
+ /*
+ * Mark the second gb as device
+ */
+ if (idx == dev0_start_index) {
+ xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][0];
+ l1_xlation_table[idx] = create_table_desc(xt_addr);
+ continue;
+ }
+
+ /*
+ * Fill up the block entry for the DRAM with Normal
+ * inner-WBWA outer-WBWA non-transient attributes.
+ * This will cover 2-4GB. Note that the acesses are
+ * marked as non-secure.
+ */
+ if ((idx >= dram_start_index) && (idx < dram_end_index)) {
+ l1_xlation_table[idx] = create_rwmem_block(idx, LEVEL1,
+ NS);
+ continue;
+ }
+
+ assert(0);
+ }
+
+
+ /*****************************************************************
+ * LEVEL2 PAGETABLE SETUP
+ *
+ * Find the start and end indices of the memory & peripherals in the
+ * second level pagetables.
+ ******************************************************************/
+
+ /* Initializations for the 1st GB */
+ trom_start_index = TWO_MB_INDEX(TZROM_BASE);
+ trom_end_index = TWO_MB_INDEX(TZROM_BASE + TZROM_SIZE);
+ if (trom_end_index == trom_start_index)
+ trom_end_index++;
+
+ tzdram_start_index = TWO_MB_INDEX(TZDRAM_BASE);
+ tzdram_end_index = TWO_MB_INDEX(TZDRAM_BASE + TZDRAM_SIZE);
+ if (tzdram_end_index == tzdram_start_index)
+ tzdram_end_index++;
+
+ flash0_start_index = TWO_MB_INDEX(FLASH0_BASE);
+ flash0_end_index = TWO_MB_INDEX(FLASH0_BASE + TZROM_SIZE);
+ if (flash0_end_index == flash0_start_index)
+ flash0_end_index++;
+
+ flash1_start_index = TWO_MB_INDEX(FLASH1_BASE);
+ flash1_end_index = TWO_MB_INDEX(FLASH1_BASE + FLASH1_SIZE);
+ if (flash1_end_index == flash1_start_index)
+ flash1_end_index++;
+
+ vram_start_index = TWO_MB_INDEX(VRAM_BASE);
+ vram_end_index = TWO_MB_INDEX(VRAM_BASE + VRAM_SIZE);
+ if (vram_end_index == vram_start_index)
+ vram_end_index++;
+
+ dev0_start_index = TWO_MB_INDEX(DEVICE0_BASE);
+ dev0_end_index = TWO_MB_INDEX(DEVICE0_BASE + DEVICE0_SIZE);
+ if (dev0_end_index == dev0_start_index)
+ dev0_end_index++;
+
+ dev1_start_index = TWO_MB_INDEX(DEVICE1_BASE);
+ dev1_end_index = TWO_MB_INDEX(DEVICE1_BASE + DEVICE1_SIZE);
+ if (dev1_end_index == dev1_start_index)
+ dev1_end_index++;
+
+ /* Since the size is < 2M this is a single index */
+ tzram_start_index = TWO_MB_INDEX(TZRAM_BASE);
+ nsram_start_index = TWO_MB_INDEX(NSRAM_BASE);
+
+ /*
+ * Fill up the level2 translation table for the first GB next
+ */
+ for (idx = 0; idx < NUM_2MB_IN_GB; idx++) {
+
+ l2_desc = INVALID_DESC;
+ xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][idx];
+
+ /* Block entries for 64M of trusted Boot ROM */
+ if ((idx >= trom_start_index) && (idx < trom_end_index))
+ l2_desc = create_romem_block(idx, LEVEL2, 0);
+
+ /* Single L3 page table entry for 256K of TZRAM */
+ if (idx == tzram_start_index) {
+ pt_addr = &l3_xlation_table[BL32_TZRAM_PAGETABLE][0];
+ l2_desc = create_table_desc(pt_addr);
+ }
+
+ /*
+ * Single L3 page table entry for first 2MB of TZDRAM.
+ * TODO: We are assuming the BL32 image will not
+ * exceed this
+ */
+ if (idx == tzdram_start_index) {
+ pt_addr = &l3_xlation_table[BL32_TZDRAM_PAGETABLE][0];
+ l2_desc = create_table_desc(pt_addr);
+ }
+
+ /* Block entries for 32M of trusted DRAM */
+ if ((idx >= tzdram_start_index + 1) && (idx <= tzdram_end_index))
+ l2_desc = create_rwmem_block(idx, LEVEL2, 0);
+
+ /* Block entries for 64M of aliased trusted Boot ROM */
+ if ((idx >= flash0_start_index) && (idx < flash0_end_index))
+ l2_desc = create_romem_block(idx, LEVEL2, 0);
+
+ /* Block entries for 64M of flash1 */
+ if ((idx >= flash1_start_index) && (idx < flash1_end_index))
+ l2_desc = create_romem_block(idx, LEVEL2, 0);
+
+ /* Block entries for 32M of VRAM */
+ if ((idx >= vram_start_index) && (idx < vram_end_index))
+ l2_desc = create_rwmem_block(idx, LEVEL2, 0);
+
+ /* Block entries for all the devices in the first gb */
+ if ((idx >= dev0_start_index) && (idx < dev0_end_index))
+ l2_desc = create_device_block(idx, LEVEL2, 0);
+
+ /* Block entries for all the devices in the first gb */
+ if ((idx >= dev1_start_index) && (idx < dev1_end_index))
+ l2_desc = create_device_block(idx, LEVEL2, 0);
+
+ /* Single L3 page table entry for 64K of NSRAM */
+ if (idx == nsram_start_index) {
+ pt_addr = &l3_xlation_table[BL32_NSRAM_PAGETABLE][0];
+ l2_desc = create_table_desc(pt_addr);
+ }
+
+ *xt_addr = l2_desc;
+ }
+
+
+ /*
+ * Initializations for the 2nd GB. Mark everything as device
+ * for the time being as the memory map is not final. Each
+ * index will need to be offset'ed to allow absolute values
+ */
+ off = NUM_2MB_IN_GB;
+ for (idx = off; idx < (NUM_2MB_IN_GB + off); idx++) {
+ l2_desc = create_device_block(idx, LEVEL2, 0);
+ xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][idx - off];
+ *xt_addr = l2_desc;
+ }
+
+
+ /*****************************************************************
+ * LEVEL3 PAGETABLE SETUP
+ *****************************************************************/
+
+ /* Fill up the level3 pagetable for the trusted SRAM. */
+ tzram_start_index = FOUR_KB_INDEX(TZRAM_BASE);
+ tzram_end_index = FOUR_KB_INDEX(TZRAM_BASE + TZRAM_SIZE);
+ if (tzram_end_index == tzram_start_index)
+ tzram_end_index++;
+
+ /* Each index will need to be offset'ed to allow absolute values */
+ off = FOUR_KB_INDEX(TZRAM_BASE);
+ for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
+
+ l3_desc = INVALID_DESC;
+ xt_addr = &l3_xlation_table[BL32_TZRAM_PAGETABLE][idx - off];
+
+ /*
+ * TODO: All TZRAM memory is being mapped as RW while
+ * earlier boot stages map parts of it as Device. This
+ * could lead to possible coherency issues. Ditto for
+ * the way earlier boot loader stages map TZDRAM
+ */
+ if (idx >= tzram_start_index && idx < tzram_end_index)
+ l3_desc = create_rwmem_block(idx, LEVEL3, 0);
+
+ *xt_addr = l3_desc;
+ }
+
+ /* Fill up the level3 pagetable for the trusted DRAM. */
+ tzdram_start_index = FOUR_KB_INDEX(tzdram_layout->total_base);
+ tzdram_end_index = FOUR_KB_INDEX(tzdram_layout->total_base +
+ tzdram_layout->total_size);
+ if (tzdram_end_index == tzdram_start_index)
+ tzdram_end_index++;
+
+ /* Reusing trom* to mark RO memory. */
+ trom_start_index = FOUR_KB_INDEX(ro_start);
+ trom_end_index = FOUR_KB_INDEX(ro_limit);
+ if (trom_end_index == trom_start_index)
+ trom_end_index++;
+
+ /* Reusing dev* to mark coherent device memory. */
+ dev0_start_index = FOUR_KB_INDEX(coh_start);
+ dev0_end_index = FOUR_KB_INDEX(coh_limit);
+ if (dev0_end_index == dev0_start_index)
+ dev0_end_index++;
+
+ /* Each index will need to be offset'ed to allow absolute values */
+ off = FOUR_KB_INDEX(TZDRAM_BASE);
+ for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
+
+ l3_desc = INVALID_DESC;
+ xt_addr = &l3_xlation_table[BL32_TZDRAM_PAGETABLE][idx - off];
+
+ if (idx >= tzdram_start_index && idx < tzdram_end_index)
+ l3_desc = create_rwmem_block(idx, LEVEL3, 0);
+
+ if (idx >= trom_start_index && idx < trom_end_index)
+ l3_desc = create_romem_block(idx, LEVEL3, 0);
+
+ if (idx >= dev0_start_index && idx < dev0_end_index)
+ l3_desc = create_device_block(idx, LEVEL3, 0);
+
+ *xt_addr = l3_desc;
+ }
+
+ /* Fill up the level3 pagetable for the non-trusted SRAM. */
+ nsram_start_index = FOUR_KB_INDEX(NSRAM_BASE);
+ nsram_end_index = FOUR_KB_INDEX(NSRAM_BASE + NSRAM_SIZE);
+ if (nsram_end_index == nsram_start_index)
+ nsram_end_index++;
+
+ /* Each index will need to be offset'ed to allow absolute values */
+ off = FOUR_KB_INDEX(NSRAM_BASE);
+ for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
+
+ l3_desc = INVALID_DESC;
+ xt_addr = &l3_xlation_table[BL32_NSRAM_PAGETABLE][idx - off];
+
+ if (idx >= nsram_start_index && idx < nsram_end_index)
+ l3_desc = create_rwmem_block(idx, LEVEL3, NS);
+
+ *xt_addr = l3_desc;
+ }
+
+ return (unsigned long) l1_xlation_table;
+}
diff --git a/plat/fvp/bl32_plat_setup.c b/plat/fvp/bl32_plat_setup.c
new file mode 100644
index 0000000..ba418fd
--- /dev/null
+++ b/plat/fvp/bl32_plat_setup.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <platform.h>
+#include <bl32.h>
+#include <bl_common.h>
+#include <console.h>
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+extern unsigned long __RO_START__;
+extern unsigned long __RO_END__;
+
+extern unsigned long __COHERENT_RAM_START__;
+extern unsigned long __COHERENT_RAM_END__;
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned. It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL32_RO_BASE (unsigned long)(&__RO_START__)
+#define BL32_RO_LIMIT (unsigned long)(&__RO_END__)
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned. It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL32_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
+#define BL32_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
+
+/* Data structure which holds the extents of the trusted SRAM for BL32 */
+static meminfo bl32_tzdram_layout
+__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
+ section("tzfw_coherent_mem")));
+
+meminfo *bl32_plat_sec_mem_layout(void)
+{
+ return &bl32_tzdram_layout;
+}
+
+/*******************************************************************************
+ * BL1 has passed the extents of the trusted SRAM that's at BL32's disposal.
+ * Initialize the BL32 data structure with the memory extends
+ ******************************************************************************/
+void bl32_early_platform_setup(meminfo *mem_layout,
+ void *data)
+{
+ /* Setup the BL32 memory layout */
+ bl32_tzdram_layout.total_base = mem_layout->total_base;
+ bl32_tzdram_layout.total_size = mem_layout->total_size;
+ bl32_tzdram_layout.free_base = mem_layout->free_base;
+ bl32_tzdram_layout.free_size = mem_layout->free_size;
+ bl32_tzdram_layout.attr = mem_layout->attr;
+ bl32_tzdram_layout.next = 0;
+
+ return;
+}
+
+/*******************************************************************************
+ * Perform platform specific setup
+ ******************************************************************************/
+void bl32_platform_setup()
+{
+ /*
+ * Initialize a different console than already in use to display
+ * messages from TSP
+ */
+ console_init(PL011_UART1_BASE);
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the MMU
+ ******************************************************************************/
+void bl32_plat_arch_setup()
+{
+ configure_mmu(&bl32_tzdram_layout,
+ BL32_RO_BASE,
+ BL32_RO_LIMIT,
+ BL32_COHERENT_RAM_BASE,
+ BL32_COHERENT_RAM_LIMIT);
+}