plat: marvell: Add common ARMADA platform components

Add common Marvell ARMADA platform components.
This patch also includes common components for Marvell
ARMADA 8K platforms.

Change-Id: I42192fdc6525a42e46b3ac2ad63c83db9bcbfeaf
Signed-off-by: Hanna Hawa <hannah@marvell.com>
Signed-off-by: Konstantin Porotchkin <kostap@marvell.com>
diff --git a/plat/marvell/common/aarch64/marvell_common.c b/plat/marvell/common/aarch64/marvell_common.c
new file mode 100644
index 0000000..abc501a
--- /dev/null
+++ b/plat/marvell/common/aarch64/marvell_common.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_marvell.h>
+#include <platform_def.h>
+#include <xlat_tables.h>
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_get_ns_image_entrypoint
+#pragma weak plat_marvell_get_mmap
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The extents of the generic memory regions are specified by the function
+ * arguments and consist of:
+ * - Trusted SRAM seen by the BL image;
+ * - Code section;
+ * - Read-only data section;
+ * - Coherent memory region, if applicable.
+ */
+void marvell_setup_page_tables(uintptr_t total_base,
+			       size_t total_size,
+			       uintptr_t code_start,
+			       uintptr_t code_limit,
+			       uintptr_t rodata_start,
+			       uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+			       ,
+			       uintptr_t coh_start,
+			       uintptr_t coh_limit
+#endif
+			   )
+{
+	/*
+	 * Map the Trusted SRAM with appropriate memory attributes.
+	 * Subsequent mappings will adjust the attributes for specific regions.
+	 */
+	VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
+		(void *) total_base, (void *) (total_base + total_size));
+	mmap_add_region(total_base, total_base,
+			total_size,
+			MT_MEMORY | MT_RW | MT_SECURE);
+
+	/* Re-map the code section */
+	VERBOSE("Code region: %p - %p\n",
+		(void *) code_start, (void *) code_limit);
+	mmap_add_region(code_start, code_start,
+			code_limit - code_start,
+			MT_CODE | MT_SECURE);
+
+	/* Re-map the read-only data section */
+	VERBOSE("Read-only data region: %p - %p\n",
+		(void *) rodata_start, (void *) rodata_limit);
+	mmap_add_region(rodata_start, rodata_start,
+			rodata_limit - rodata_start,
+			MT_RO_DATA | MT_SECURE);
+
+#if USE_COHERENT_MEM
+	/* Re-map the coherent memory region */
+	VERBOSE("Coherent region: %p - %p\n",
+		(void *) coh_start, (void *) coh_limit);
+	mmap_add_region(coh_start, coh_start,
+			coh_limit - coh_start,
+			MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+	/* Now (re-)map the platform-specific memory regions */
+	mmap_add(plat_marvell_get_mmap());
+
+	/* Create the page tables to reflect the above mappings */
+	init_xlat_tables();
+}
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+	return PLAT_MARVELL_NS_IMAGE_OFFSET;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL32 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl32_entry(void)
+{
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL32 image.
+	 */
+	return 0;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL33 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl33_entry(void)
+{
+	unsigned long el_status;
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+	el_status &= ID_AA64PFR0_ELX_MASK;
+
+	mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+
+/*****************************************************************************
+ * Returns ARM platform specific memory map regions.
+ *****************************************************************************
+ */
+const mmap_region_t *plat_marvell_get_mmap(void)
+{
+	return plat_marvell_mmap;
+}
+
diff --git a/plat/marvell/common/aarch64/marvell_helpers.S b/plat/marvell/common/aarch64/marvell_helpers.S
new file mode 100644
index 0000000..a3dc917
--- /dev/null
+++ b/plat/marvell/common/aarch64/marvell_helpers.S
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <asm_macros.S>
+#include <cortex_a72.h>
+#include <marvell_def.h>
+#include <platform_def.h>
+#ifndef PLAT_a3700
+#include <ccu.h>
+#include <cache_llc.h>
+#endif
+
+	.weak	plat_marvell_calc_core_pos
+	.weak	plat_my_core_pos
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	platform_mem_init
+	.globl	disable_mmu_dcache
+	.globl	invalidate_tlb_all
+	.globl	platform_unmap_sram
+	.globl	disable_sram
+	.globl	disable_icache
+	.globl	invalidate_icache_all
+	.globl	marvell_exit_bootrom
+	.globl	ca72_l2_enable_unique_clean
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_my_core_pos(void)
+	 *  This function uses the plat_marvell_calc_core_pos()
+	 *  definition to get the index of the calling CPU.
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	b	plat_marvell_calc_core_pos
+endfunc plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_marvell_calc_core_pos(uint64_t mpidr)
+	 *  Helper function to calculate the core position.
+	 *  With this function: CorePos = (ClusterId * 2) +
+	 *  				  CoreId
+	 * -----------------------------------------------------
+	 */
+func plat_marvell_calc_core_pos
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #7
+	ret
+endfunc plat_marvell_calc_core_pos
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0, x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, PLAT_MARVELL_CRASH_UART_BASE
+	mov_imm	x1, PLAT_MARVELL_CRASH_UART_CLK_IN_HZ
+	mov_imm	x2, MARVELL_CONSOLE_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm	x1, PLAT_MARVELL_CRASH_UART_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------------------------------
+	 * We don't need to carry out any memory initialization on ARM
+	 * platforms. The Secure RAM is accessible straight away.
+	 * ---------------------------------------------------------------------
+	 */
+func platform_mem_init
+	ret
+endfunc platform_mem_init
+
+	/* -----------------------------------------------------
+	 * Disable icache, dcache, and MMU
+	 * -----------------------------------------------------
+	 */
+func disable_mmu_dcache
+	mrs	x0, sctlr_el3
+	bic	x0, x0, 0x1		/* M bit - MMU */
+	bic	x0, x0, 0x4		/* C bit - Dcache L1 & L2 */
+	msr	sctlr_el3, x0
+	isb
+	b	mmu_off
+mmu_off:
+	ret
+endfunc disable_mmu_dcache
+
+	/* -----------------------------------------------------
+	 * Disable all TLB entries
+	 * -----------------------------------------------------
+	 */
+func invalidate_tlb_all
+	tlbi	alle3
+	dsb	sy
+	isb
+	ret
+endfunc invalidate_tlb_all
+
+	/* -----------------------------------------------------
+	 * Disable the i cache
+	 * -----------------------------------------------------
+	 */
+func disable_icache
+	mrs 	x0, sctlr_el3
+	bic	x0, x0, 0x1000	/* I bit - Icache L1 & L2 */
+	msr	sctlr_el3, x0
+	isb
+	ret
+endfunc disable_icache
+
+	/* -----------------------------------------------------
+	 * Disable all of the i caches
+	 * -----------------------------------------------------
+	 */
+func invalidate_icache_all
+	ic	ialluis
+	isb	sy
+	ret
+endfunc invalidate_icache_all
+
+	/* -----------------------------------------------------
+	 * Clear the SRAM enabling bit to unmap SRAM
+	 * -----------------------------------------------------
+	 */
+func platform_unmap_sram
+	ldr	x0, =CCU_SRAM_WIN_CR
+	str	wzr, [x0]
+	ret
+endfunc platform_unmap_sram
+
+	/* -----------------------------------------------------
+	 * Disable the SRAM
+	 * -----------------------------------------------------
+	 */
+func disable_sram
+	/* Disable the line lockings. They must be disabled expictly
+	 * or the OS will have problems using the cache */
+	ldr	x1, =MASTER_LLC_TC0_LOCK
+	str	wzr, [x1]
+
+	/* Invalidate all ways */
+	ldr	w1, =LLC_WAY_MASK
+	ldr	x0, =MASTER_L2X0_INV_WAY
+	str	w1, [x0]
+
+	/* Finally disable LLC */
+	ldr	x0, =MASTER_LLC_CTRL
+	str	wzr, [x0]
+
+	ret
+endfunc disable_sram
+
+	/* -----------------------------------------------------
+	 * Operation when exit bootROM:
+	 * Disable the MMU
+	 * Disable and invalidate the dcache
+	 * Unmap and disable the SRAM
+	 * Disable and invalidate the icache
+	 * -----------------------------------------------------
+	 */
+func marvell_exit_bootrom
+	/* Save the system restore address */
+	mov	x28, x0
+
+	/* Close the caches and MMU */
+	bl	disable_mmu_dcache
+
+	/*
+	 * There is nothing important in the caches now,
+	 * so invalidate them instead of cleaning.
+	 */
+	adr	x0, __RW_START__
+	adr	x1, __RW_END__
+	sub	x1, x1, x0
+	bl	inv_dcache_range
+	bl	invalidate_tlb_all
+
+	/*
+	 * Clean the memory mapping of SRAM
+	 * the DDR mapping will remain to enable boot image to execute
+	 */
+	bl	platform_unmap_sram
+
+	/* Disable the SRAM */
+	bl	disable_sram
+
+	/* Disable and invalidate icache */
+	bl	disable_icache
+	bl	invalidate_icache_all
+
+	mov	x0, x28
+	br	x0
+endfunc marvell_exit_bootrom
+
+	/*
+	 * Enable L2 UniqueClean evictions with data
+	 */
+func ca72_l2_enable_unique_clean
+
+	mrs	x0, CORTEX_A72_L2ACTLR_EL1
+	orr	x0, x0, #CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN
+	msr	CORTEX_A72_L2ACTLR_EL1, x0
+
+	ret
+endfunc ca72_l2_enable_unique_clean