sc7180 platform support

Adding support for QTI CHIP SC7180 on ATF

Change-Id: I0d82d3a378036003fbd0bc4784f61464bb76ea82
Signed-off-by: Saurabh Gorecha <sgorecha@codeaurora.org>
Co-authored-by: Maulik Shah <mkshah@codeaurora.org>
diff --git a/plat/qti/common/src/aarch64/qti_helpers.S b/plat/qti/common/src/aarch64/qti_helpers.S
new file mode 100644
index 0000000..c1ea7b3
--- /dev/null
+++ b/plat/qti/common/src/aarch64/qti_helpers.S
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018,2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <drivers/arm/gicv2.h>
+#include <drivers/arm/gicv3.h>
+#include <drivers/console.h>
+
+#include <platform_def.h>
+
+	.globl	plat_my_core_pos
+	.globl	plat_qti_core_pos_by_mpidr
+	.globl	plat_reset_handler
+	.globl	plat_panic_handler
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_qti_core_pos_by_mpidr(uint64_t mpidr)
+	 *  Helper function to calculate the core position.
+	 *  With this function:
+	 *  CorePos = (ClusterId * 4) + CoreId
+	 *  - In ARM v8   (MPIDR_EL1[24]=0)
+	 *    ClusterId = MPIDR_EL1[15:8]
+	 *    CoreId    = MPIDR_EL1[7:0]
+	 *  - In ARM v8.1 (MPIDR_EL1[24]=1)
+	 *    ClusterId = MPIDR_EL1[23:15]
+	 *    CoreId    = MPIDR_EL1[15:8]
+	 *  Clobbers: x0 & x1.
+	 * -----------------------------------------------------
+	 */
+func plat_qti_core_pos_by_mpidr
+	mrs	x1, mpidr_el1
+	tst	x1, #MPIDR_MT_MASK
+	beq	plat_qti_core_pos_by_mpidr_no_mt
+	/* Right shift mpidr by one affinity level when MT=1. */
+	lsr	x0, x0, #MPIDR_AFFINITY_BITS
+plat_qti_core_pos_by_mpidr_no_mt:
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc plat_qti_core_pos_by_mpidr
+
+	/* --------------------------------------------------------------------
+	 * void plat_panic_handler(void)
+	 * calls SDI and reset system
+	 * --------------------------------------------------------------------
+	 */
+func plat_panic_handler
+	msr	spsel, #0
+	bl	plat_set_my_stack
+	b	qtiseclib_panic
+endfunc plat_panic_handler
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_my_core_pos(void)
+	 *  This function uses the plat_qti_calc_core_pos()
+	 *  definition to get the index of the calling CPU
+	 *  Clobbers: x0 & x1.
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	b	plat_qti_core_pos_by_mpidr
+endfunc plat_my_core_pos
+
+func plat_reset_handler
+	/* save the lr */
+	mov	x18, x30
+
+	/* Serialize CPUSS boot setup. Multi core enter simultaneously. */
+	ldr	x0, =g_qti_cpuss_boot_lock
+	bl	spin_lock
+
+	/* pass cold boot status. */
+	ldr	w0, g_qti_bl31_cold_booted
+	/* Execuete CPUSS boot set up on every core. */
+	bl	qtiseclib_cpuss_reset_asm
+
+	ldr	x0, =g_qti_cpuss_boot_lock
+	bl	spin_unlock
+
+	ret	x18
+endfunc plat_reset_handler
diff --git a/plat/qti/common/src/aarch64/qti_kryo4_gold.S b/plat/qti/common/src/aarch64/qti_kryo4_gold.S
new file mode 100644
index 0000000..a1b40c8
--- /dev/null
+++ b/plat/qti/common/src/aarch64/qti_kryo4_gold.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+
+#include <plat_macros.S>
+#include <qti_cpu.h>
+
+	.p2align 3
+
+/* -------------------------------------------------
+ * The CPU Ops reset function for Kryo-3 Gold
+ * -------------------------------------------------
+ */
+func qti_kryo4_gold_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+	adr	x0, wa_cve_2017_5715_bpiall_vbar
+	msr	vbar_el3, x0
+	isb
+#endif
+
+	mov	x19, x30
+
+	bl	qtiseclib_kryo4_gold_reset_asm
+
+	ret	x19
+
+endfunc qti_kryo4_gold_reset_func
+
+/* ----------------------------------------------------
+ * The CPU Ops core power down function for Kryo-3 Gold
+ * ----------------------------------------------------
+ */
+func qti_kryo4_gold_core_pwr_dwn
+	ret
+endfunc qti_kryo4_gold_core_pwr_dwn
+
+/* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Kryo-3 Gold
+ * -------------------------------------------------------
+ */
+func qti_kryo4_gold_cluster_pwr_dwn
+	ret
+endfunc qti_kryo4_gold_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Kryo4 Gold. Must follow AAPCS.
+ */
+func qti_kryo4_gold_errata_report
+	/* TODO : Need to add support. Required only for debug bl31 image.*/
+	ret
+endfunc qti_kryo4_gold_errata_report
+#endif
+
+/* ---------------------------------------------
+ * This function provides kryo4_gold specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ASCII and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.qti_kryo4_gold_regs, "aS"
+qti_kryo4_gold_regs:  /* The ASCII list of register names to be reported */
+	.asciz	""
+
+func qti_kryo4_gold_cpu_reg_dump
+	adr	x6, qti_kryo4_gold_regs
+	ret
+endfunc qti_kryo4_gold_cpu_reg_dump
+
+declare_cpu_ops	qti_kryo4_gold, QTI_KRYO4_GOLD_MIDR,	\
+		qti_kryo4_gold_reset_func,		\
+		qti_kryo4_gold_core_pwr_dwn,	\
+		qti_kryo4_gold_cluster_pwr_dwn
diff --git a/plat/qti/common/src/aarch64/qti_kryo4_silver.S b/plat/qti/common/src/aarch64/qti_kryo4_silver.S
new file mode 100644
index 0000000..183eeb0
--- /dev/null
+++ b/plat/qti/common/src/aarch64/qti_kryo4_silver.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+
+#include <plat_macros.S>
+#include <qti_cpu.h>
+
+	.p2align 3
+
+/* -------------------------------------------------
+ * The CPU Ops reset function for Kryo-3 Silver
+ * -------------------------------------------------
+ */
+func qti_kryo4_silver_reset_func
+	mov	x19, x30
+
+	bl	qtiseclib_kryo4_silver_reset_asm
+
+	ret	x19
+
+endfunc qti_kryo4_silver_reset_func
+
+/* ------------------------------------------------------
+ * The CPU Ops core power down function for Kryo-3 Silver
+ * ------------------------------------------------------
+ */
+func qti_kryo4_silver_core_pwr_dwn
+	ret
+endfunc qti_kryo4_silver_core_pwr_dwn
+
+/* ---------------------------------------------------------
+ * The CPU Ops cluster power down function for Kryo-3 Silver
+ * ---------------------------------------------------------
+ */
+func qti_kryo4_silver_cluster_pwr_dwn
+	ret
+endfunc qti_kryo4_silver_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Kryo4 Silver. Must follow AAPCS.
+ */
+func qti_kryo4_silver_errata_report
+	/* TODO : Need to add support. Required only for debug bl31 image.*/
+	ret
+endfunc qti_kryo4_silver_errata_report
+#endif
+
+
+/* ---------------------------------------------
+ * This function provides kryo4_silver specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ASCII and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.qti_kryo4_silver_regs, "aS"
+qti_kryo4_silver_regs:  /* The ASCII list of register names to be reported */
+	.asciz	""
+
+func qti_kryo4_silver_cpu_reg_dump
+	adr	x6, qti_kryo4_silver_regs
+	ret
+endfunc qti_kryo4_silver_cpu_reg_dump
+
+
+declare_cpu_ops	qti_kryo4_silver, QTI_KRYO4_SILVER_MIDR,	\
+		qti_kryo4_silver_reset_func,		\
+		qti_kryo4_silver_core_pwr_dwn,		\
+		qti_kryo4_silver_cluster_pwr_dwn
diff --git a/plat/qti/common/src/aarch64/qti_uart_console.S b/plat/qti/common/src/aarch64/qti_uart_console.S
new file mode 100644
index 0000000..2eb33d9
--- /dev/null
+++ b/plat/qti/common/src/aarch64/qti_uart_console.S
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <console_macros.S>
+
+#include <platform_def.h>
+#include <qti_uart_console.h>
+
+/*
+ * This driver implements console logging into a ring buffer.
+ */
+
+	.globl qti_console_uart_register
+
+	/* -----------------------------------------------
+	 * int qti_console_uart_register(console_t *console,
+	 *				 uintptr_t uart_base_addr)
+	 * Registers uart console instance.
+	 * In:  x0 - pointer to empty console_t struct
+	 *      x1 - start address of uart block.
+	 * Out: x0 - 1 to indicate success
+	 * Clobber list: x0, x1, x14
+	 * -----------------------------------------------
+	 */
+func qti_console_uart_register
+	str	x1, [x0, #CONSOLE_T_BASE]	/* Save UART base. */
+	finish_console_register uart putc=1, flush=1
+endfunc qti_console_uart_register
+
+	/* -----------------------------------------------
+	 * int qti_console_uart_puts(int c, console_t *console)
+	 * Writes a character to the UART console.
+	 * The character must be preserved in x0.
+	 * In: x0 - character to be stored
+	 *     x1 - pointer to console_t struct
+	 * Clobber list: x1, x2
+	 * -----------------------------------------------
+	 */
+func console_uart_putc
+	/* set x1 = UART base. */
+	ldr	x1, [x1, #CONSOLE_T_BASE]
+
+	/* Loop until M_GENI_CMD_ACTIVE bit not clear. */
+1:	ldr	w2, [x1, #GENI_STATUS_REG]
+	and	w2, w2, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
+	cmp	w2, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
+	b.eq	1b
+
+	/* Transmit data. */
+	cmp	w0, #0xA
+	b.ne	3f
+
+	/* Add '\r' when input char is '\n' */
+	mov	w2, #0x1
+	mov	w0, #0xD
+	str	w2, [x1, #UART_TX_TRANS_LEN_REG]
+	mov	w2, #GENI_M_CMD_TX
+	str	w2, [x1, #GENI_M_CMD0_REG]
+	str	w0, [x1, #GENI_TX_FIFOn_REG]
+	mov	w0, #0xA
+
+	/* Loop until M_GENI_CMD_ACTIVE bit not clear. */
+2:	ldr	w2, [x1, #GENI_STATUS_REG]
+	and	w2, w2, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
+	cmp	w2, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
+	b.eq	2b
+
+	/* Transmit i/p data. */
+3:	mov	w2, #0x1
+	str	w2, [x1, #UART_TX_TRANS_LEN_REG]
+	mov	w2, #GENI_M_CMD_TX
+	str	w2, [x1, #GENI_M_CMD0_REG]
+	str	w0, [x1, #GENI_TX_FIFOn_REG]
+
+	ret
+endfunc	console_uart_putc
+
+	/* -----------------------------------------------
+	 * int qti_console_uart_flush(console_t *console)
+	 * In:  x0 - pointer to console_t struct
+	 * Out: x0 - 0 for success
+	 * Clobber list: x0, x1
+	 * -----------------------------------------------
+	 */
+func console_uart_flush
+	/* set x0 = UART base. */
+	ldr	x0, [x0, #CONSOLE_T_BASE]
+
+	/* Loop until M_GENI_CMD_ACTIVE bit not clear. */
+1:	ldr	w1, [x0, #GENI_STATUS_REG]
+	and	w1, w1, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
+	cmp	w1, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
+	b.eq	1b
+
+	mov	w0, #0
+	ret
+endfunc console_uart_flush
diff --git a/plat/qti/common/src/qti_bl31_setup.c b/plat/qti/common/src/qti_bl31_setup.c
new file mode 100644
index 0000000..b2bc543
--- /dev/null
+++ b/plat/qti/common/src/qti_bl31_setup.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <drivers/console.h>
+#include <drivers/generic_delay_timer.h>
+#include <lib/bl_aux_params/bl_aux_params.h>
+#include <lib/coreboot.h>
+#include <lib/spinlock.h>
+
+#include <platform.h>
+#include <qti_interrupt_svc.h>
+#include <qti_plat.h>
+#include <qti_uart_console.h>
+#include <qtiseclib_interface.h>
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL31 from BL2.
+ */
+static entry_point_info_t bl33_image_ep_info;
+
+/*
+ * Variable to hold counter frequency for the CPU's generic timer. In this
+ * platform coreboot image configure counter frequency for boot core before
+ * reaching TF-A.
+ */
+static uint64_t g_qti_cpu_cntfrq;
+
+/*
+ * Lock variable to serialize cpuss reset execution.
+ */
+spinlock_t g_qti_cpuss_boot_lock __attribute__ ((section("tzfw_coherent_mem"),
+		    aligned(CACHE_WRITEBACK_GRANULE))) = {0x0};
+
+/*
+ * Variable to hold bl31 cold boot status. Default value 0x0 means yet to boot.
+ * Any other value means cold booted.
+ */
+uint32_t g_qti_bl31_cold_booted __attribute__ ((section("tzfw_coherent_mem"))) = 0x0;
+
+/*******************************************************************************
+ * Perform any BL31 early platform setup common to ARM standard platforms.
+ * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
+ * in BL2 & S-EL3 in BL1) before they are lost (potentially). This needs to be
+ * done before the MMU is initialized so that the memory layout can be used
+ * while creating page tables. BL2 has flushed this information to memory, so
+ * we are guaranteed to pick up good data.
+ ******************************************************************************/
+void bl31_early_platform_setup(u_register_t from_bl2,
+			       u_register_t plat_params_from_bl2)
+{
+
+	g_qti_cpu_cntfrq = read_cntfrq_el0();
+
+	bl_aux_params_parse(plat_params_from_bl2, NULL);
+
+#if COREBOOT
+	if (coreboot_serial.baseaddr != 0) {
+		static console_t g_qti_console_uart;
+
+		qti_console_uart_register(&g_qti_console_uart,
+					  coreboot_serial.baseaddr);
+	}
+#endif
+
+	/*
+	 * Tell BL31 where the non-trusted software image
+	 * is located and the entry state information
+	 */
+	bl31_params_parse_helper(from_bl2, NULL, &bl33_image_ep_info);
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+				u_register_t arg2, u_register_t arg3)
+{
+	bl31_early_platform_setup(arg0, arg1);
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+	qti_setup_page_tables(BL_CODE_BASE,
+			      BL_COHERENT_RAM_END - BL_CODE_BASE,
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END,
+			      BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END);
+	enable_mmu_el3(0);
+}
+
+/*******************************************************************************
+ * Perform any BL31 platform setup common to ARM standard platforms
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+	generic_delay_timer_init();
+	/* Initialize the GIC driver, CPU and distributor interfaces */
+	plat_qti_gic_driver_init();
+	plat_qti_gic_init();
+	qti_interrupt_svc_init();
+	qtiseclib_bl31_platform_setup();
+
+	/* set boot state to cold boot complete. */
+	g_qti_bl31_cold_booted = 0x1;
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for the
+ * security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	/* QTI platform don't have BL32 implementation. */
+	assert(type == NON_SECURE);
+	assert(bl33_image_ep_info.h.type == PARAM_EP);
+	assert(bl33_image_ep_info.h.attr == NON_SECURE);
+	/*
+	 * None of the images on the platforms can have 0x0
+	 * as the entrypoint.
+	 */
+	if (bl33_image_ep_info.pc) {
+		return &bl33_image_ep_info;
+	} else {
+		return NULL;
+	}
+}
+
+/*******************************************************************************
+ * This function is used by the architecture setup code to retrieve the counter
+ * frequency for the CPU's generic timer. This value will be programmed into the
+ * CNTFRQ_EL0 register. In Arm standard platforms, it returns the base frequency
+ * of the system counter, which is retrieved from the first entry in the
+ * frequency modes table. This will be used later in warm boot (psci_arch_setup)
+ * of CPUs to set when CPU frequency.
+ ******************************************************************************/
+unsigned int plat_get_syscnt_freq2(void)
+{
+	assert(g_qti_cpu_cntfrq != 0);
+	return g_qti_cpu_cntfrq;
+}
diff --git a/plat/qti/common/src/qti_common.c b/plat/qti/common/src/qti_common.c
new file mode 100644
index 0000000..ff0fa30
--- /dev/null
+++ b/plat/qti/common/src/qti_common.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <platform_def.h>
+#include <qti_plat.h>
+#include <qtiseclib_interface.h>
+
+/*
+ * Table of regions for various BL stages to map using the MMU.
+ * This doesn't include TZRAM as the 'mem_layout' argument passed to
+ * qti_configure_mmu_elx() will give the available subset of that,
+ */
+
+const mmap_region_t plat_qti_mmap[] = {
+	MAP_REGION_FLAT(QTI_DEVICE_BASE, QTI_DEVICE_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(QTI_AOP_CMD_DB_BASE, QTI_AOP_CMD_DB_SIZE,
+			MT_NS | MT_RO | MT_EXECUTE_NEVER),
+	{0}
+};
+
+CASSERT(ARRAY_SIZE(plat_qti_mmap) <= MAX_MMAP_REGIONS, assert_max_mmap_regions);
+
+
+bool qti_is_overlap_atf_rg(unsigned long long addr, size_t size)
+{
+	if (addr > addr + size
+			|| (BL31_BASE < addr + size && BL31_LIMIT > addr)) {
+		return true;
+	}
+	return false;
+}
+
+/*
+ *  unsigned int plat_qti_my_cluster_pos(void)
+ *  definition to get the cluster index of the calling CPU.
+ *  - In ARM v8   (MPIDR_EL1[24]=0)
+ *    ClusterId = MPIDR_EL1[15:8]
+ *  - In ARM v8.1 & Later version (MPIDR_EL1[24]=1)
+ *    ClusterId = MPIDR_EL1[23:15]
+ */
+unsigned int plat_qti_my_cluster_pos(void)
+{
+	unsigned int mpidr, cluster_id;
+
+	mpidr = read_mpidr_el1();
+	if ((mpidr & MPIDR_MT_MASK) == 0) {	/* MT not supported */
+		cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	} else {		/* MT supported */
+		cluster_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
+	}
+	assert(cluster_id < PLAT_CLUSTER_COUNT);
+	return cluster_id;
+}
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The extents of the generic memory regions are specified by the function
+ * arguments and consist of:
+ * - Trusted SRAM seen by the BL image;
+ * - Code section;
+ * - Read-only data section;
+ * - Coherent memory region, if applicable.
+ */
+void qti_setup_page_tables(uintptr_t total_base,
+			   size_t total_size,
+			   uintptr_t code_start,
+			   uintptr_t code_limit,
+			   uintptr_t rodata_start,
+			   uintptr_t rodata_limit,
+			   uintptr_t coh_start, uintptr_t coh_limit)
+{
+	/*
+	 * Map the Trusted SRAM with appropriate memory attributes.
+	 * Subsequent mappings will adjust the attributes for specific regions.
+	 */
+	VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
+		(void *)total_base, (void *)(total_base + total_size));
+	mmap_add_region(total_base, total_base,
+			total_size, MT_MEMORY | MT_RW | MT_SECURE);
+
+	/* Re-map the code section */
+	VERBOSE("Code region: %p - %p\n",
+		(void *)code_start, (void *)code_limit);
+	mmap_add_region(code_start, code_start,
+			code_limit - code_start, MT_CODE | MT_SECURE);
+
+	/* Re-map the read-only data section */
+	VERBOSE("Read-only data region: %p - %p\n",
+		(void *)rodata_start, (void *)rodata_limit);
+	mmap_add_region(rodata_start, rodata_start,
+			rodata_limit - rodata_start, MT_RO_DATA | MT_SECURE);
+
+	/* Re-map the coherent memory region */
+	VERBOSE("Coherent region: %p - %p\n",
+		(void *)coh_start, (void *)coh_limit);
+	mmap_add_region(coh_start, coh_start,
+			coh_limit - coh_start, MT_DEVICE | MT_RW | MT_SECURE);
+
+	/* Now (re-)map the platform-specific memory regions */
+	mmap_add(plat_qti_mmap);
+
+	/* Create the page tables to reflect the above mappings */
+	init_xlat_tables();
+}
+
+static inline void qti_align_mem_region(uintptr_t addr, size_t size,
+					uintptr_t *aligned_addr,
+					size_t *aligned_size)
+{
+	*aligned_addr = round_down(addr, PAGE_SIZE);
+	*aligned_size = round_up(addr - *aligned_addr + size, PAGE_SIZE);
+}
+
+int qti_mmap_add_dynamic_region(uintptr_t base_pa, size_t size,
+				unsigned int attr)
+{
+	uintptr_t aligned_pa;
+	size_t aligned_size;
+
+	qti_align_mem_region(base_pa, size, &aligned_pa, &aligned_size);
+
+	if (qti_is_overlap_atf_rg(base_pa, size)) {
+		/* Memory shouldn't overlap with TF-A range. */
+		return -EPERM;
+	}
+
+	return mmap_add_dynamic_region(aligned_pa, aligned_pa, aligned_size,
+				       attr);
+}
+
+int qti_mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
+{
+	qti_align_mem_region(base_va, size, &base_va, &size);
+	return mmap_remove_dynamic_region(base_va, size);
+}
diff --git a/plat/qti/common/src/qti_gic_v3.c b/plat/qti/common/src/qti_gic_v3.c
new file mode 100644
index 0000000..a5e0ae7
--- /dev/null
+++ b/plat/qti/common/src/qti_gic_v3.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.h>
+#include <drivers/arm/gicv3.h>
+
+#include <platform.h>
+#include <platform_def.h>
+#include <qti_plat.h>
+#include <qtiseclib_defs.h>
+#include <qtiseclib_defs_plat.h>
+
+/* The GICv3 driver only needs to be initialized in EL3 */
+static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+/* Array of interrupts to be configured by the gic driver */
+static const interrupt_prop_t qti_interrupt_props[] = {
+	INTR_PROP_DESC(QTISECLIB_INT_ID_CPU_WAKEUP_SGI,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_RESET_SGI, GIC_HIGHEST_SEC_PRIORITY,
+		       INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_SEC_WDOG_BARK, GIC_HIGHEST_SEC_PRIORITY,
+		       INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_NON_SEC_WDOG_BITE,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_LEVEL),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_VMIDMT_ERR_CLT_SEC,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_VMIDMT_ERR_CLT_NONSEC,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_VMIDMT_ERR_CFG_SEC,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_VMIDMT_ERR_CFG_NONSEC,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_XPU_SEC, GIC_HIGHEST_SEC_PRIORITY,
+		       INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_XPU_NON_SEC, GIC_HIGHEST_SEC_PRIORITY,
+		       INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+#ifdef QTISECLIB_INT_ID_A1_NOC_ERROR
+	INTR_PROP_DESC(QTISECLIB_INT_ID_A1_NOC_ERROR, GIC_HIGHEST_SEC_PRIORITY,
+		       INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+#endif
+	INTR_PROP_DESC(QTISECLIB_INT_ID_A2_NOC_ERROR, GIC_HIGHEST_SEC_PRIORITY,
+		       INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_CONFIG_NOC_ERROR,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_DC_NOC_ERROR, GIC_HIGHEST_SEC_PRIORITY,
+		       INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_MEM_NOC_ERROR, GIC_HIGHEST_SEC_PRIORITY,
+		       INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_SYSTEM_NOC_ERROR,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+	INTR_PROP_DESC(QTISECLIB_INT_ID_MMSS_NOC_ERROR,
+		       GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+		       GIC_INTR_CFG_EDGE),
+};
+
+const gicv3_driver_data_t qti_gic_data = {
+	.gicd_base = QTI_GICD_BASE,
+	.gicr_base = QTI_GICR_BASE,
+	.interrupt_props = qti_interrupt_props,
+	.interrupt_props_num = ARRAY_SIZE(qti_interrupt_props),
+	.rdistif_num = PLATFORM_CORE_COUNT,
+	.rdistif_base_addrs = rdistif_base_addrs,
+	.mpidr_to_core_pos = plat_qti_core_pos_by_mpidr
+};
+
+void plat_qti_gic_driver_init(void)
+{
+	/*
+	 * The GICv3 driver is initialized in EL3 and does not need
+	 * to be initialized again in SEL1. This is because the S-EL1
+	 * can use GIC system registers to manage interrupts and does
+	 * not need GIC interface base addresses to be configured.
+	 */
+	gicv3_driver_init(&qti_gic_data);
+}
+
+/******************************************************************************
+ * ARM common helper to initialize the GIC. Only invoked by BL31
+ *****************************************************************************/
+void plat_qti_gic_init(void)
+{
+	unsigned int i;
+
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+
+	/* Route secure spi interrupt to ANY. */
+	for (i = 0; i < ARRAY_SIZE(qti_interrupt_props); i++) {
+		unsigned int int_id = qti_interrupt_props[i].intr_num;
+
+		if (plat_ic_is_spi(int_id)) {
+			gicv3_set_spi_routing(int_id, GICV3_IRM_ANY, 0x0);
+		}
+	}
+}
+
+void gic_set_spi_routing(unsigned int id, unsigned int irm, u_register_t target)
+{
+	gicv3_set_spi_routing(id, irm, target);
+}
+
+/******************************************************************************
+ * ARM common helper to enable the GIC CPU interface
+ *****************************************************************************/
+void plat_qti_gic_cpuif_enable(void)
+{
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * ARM common helper to disable the GIC CPU interface
+ *****************************************************************************/
+void plat_qti_gic_cpuif_disable(void)
+{
+	gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * ARM common helper to initialize the per-CPU redistributor interface in GICv3
+ *****************************************************************************/
+void plat_qti_gic_pcpu_init(void)
+{
+	gicv3_rdistif_init(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * ARM common helpers to power GIC redistributor interface
+ *****************************************************************************/
+void plat_qti_gic_redistif_on(void)
+{
+	gicv3_rdistif_on(plat_my_core_pos());
+}
+
+void plat_qti_gic_redistif_off(void)
+{
+	gicv3_rdistif_off(plat_my_core_pos());
+}
diff --git a/plat/qti/common/src/qti_interrupt_svc.c b/plat/qti/common/src/qti_interrupt_svc.c
new file mode 100644
index 0000000..89cd7b5
--- /dev/null
+++ b/plat/qti/common/src/qti_interrupt_svc.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018,2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <bl31/interrupt_mgmt.h>
+#include <drivers/arm/gic_common.h>
+#include <lib/el3_runtime/context_mgmt.h>
+
+#include <platform.h>
+#include <qti_interrupt_svc.h>
+#include <qtiseclib_interface.h>
+
+#define QTI_INTR_INVALID_INT_NUM		0xFFFFFFFFU
+
+/*
+ * Top-level EL3 interrupt handler.
+ */
+static uint64_t qti_el3_interrupt_handler(uint32_t id, uint32_t flags,
+					  void *handle, void *cookie)
+{
+	uint32_t irq = QTI_INTR_INVALID_INT_NUM;
+
+	/*
+	 * EL3 non-interruptible. Interrupt shouldn't occur when we are at
+	 * EL3 / Secure.
+	 */
+	assert(handle != cm_get_context(SECURE));
+
+	irq = plat_ic_acknowledge_interrupt();
+
+	qtiseclib_invoke_isr(irq, handle);
+
+	/* End of Interrupt. */
+	if (irq < 1022U) {
+		plat_ic_end_of_interrupt(irq);
+	}
+
+	return (uint64_t) handle;
+}
+
+int qti_interrupt_svc_init(void)
+{
+	int ret;
+	uint64_t flags = 0U;
+
+	/*
+	 * Route EL3 interrupts to EL3 when in Non-secure.
+	 * Note: EL3 won't have interrupt enable
+	 * & we don't have S-EL1 support.
+	 */
+	set_interrupt_rm_flag(flags, NON_SECURE);
+
+	/* Register handler for EL3 interrupts */
+	ret = register_interrupt_type_handler(INTR_TYPE_EL3,
+					      qti_el3_interrupt_handler, flags);
+	assert(ret == 0);
+
+	return ret;
+}
diff --git a/plat/qti/common/src/qti_pm.c b/plat/qti/common/src/qti_pm.c
new file mode 100644
index 0000000..4a5877c
--- /dev/null
+++ b/plat/qti/common/src/qti_pm.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <lib/psci/psci.h>
+
+#include <platform.h>
+#include <platform_def.h>
+#include <qti_cpu.h>
+#include <qti_plat.h>
+#include <qtiseclib_cb_interface.h>
+#include <qtiseclib_defs_plat.h>
+#include <qtiseclib_interface.h>
+
+#define QTI_LOCAL_PSTATE_WIDTH		4
+#define QTI_LOCAL_PSTATE_MASK		((1 << QTI_LOCAL_PSTATE_WIDTH) - 1)
+
+/* Make composite power state parameter till level 0 */
+#define qti_make_pwrstate_lvl0(lvl0_state, type) \
+		(((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT))
+
+/* Make composite power state parameter till level 1 */
+#define qti_make_pwrstate_lvl1(lvl1_state, lvl0_state, type) \
+		(((lvl1_state) << QTI_LOCAL_PSTATE_WIDTH) | \
+		qti_make_pwrstate_lvl0(lvl0_state, type))
+
+/* Make composite power state parameter till level 2 */
+#define qti_make_pwrstate_lvl2(lvl2_state, lvl1_state, lvl0_state, type) \
+		(((lvl2_state) << (QTI_LOCAL_PSTATE_WIDTH * 2)) | \
+		qti_make_pwrstate_lvl1(lvl1_state, lvl0_state, type))
+
+/* Make composite power state parameter till level 3 */
+#define qti_make_pwrstate_lvl3(lvl3_state, lvl2_state, lvl1_state, lvl0_state, type) \
+		(((lvl3_state) << (QTI_LOCAL_PSTATE_WIDTH * 3)) | \
+		qti_make_pwrstate_lvl2(lvl2_state, lvl1_state, lvl0_state, type))
+
+/* QTI_CORE_PWRDN_EN_MASK happens to be same across all CPUs */
+#define QTI_CORE_PWRDN_EN_MASK		1
+
+/* cpu power control happens to be same across all CPUs */
+_DEFINE_SYSREG_WRITE_FUNC(cpu_pwrctrl_val, S3_0_C15_C2_7)
+_DEFINE_SYSREG_READ_FUNC(cpu_pwrctrl_val, S3_0_C15_C2_7)
+
+const unsigned int qti_pm_idle_states[] = {
+	qti_make_pwrstate_lvl0(QTI_LOCAL_STATE_OFF,
+			       PSTATE_TYPE_POWERDOWN),
+	qti_make_pwrstate_lvl0(QTI_LOCAL_STATE_DEEPOFF,
+			       PSTATE_TYPE_POWERDOWN),
+	qti_make_pwrstate_lvl1(QTI_LOCAL_STATE_DEEPOFF,
+			       QTI_LOCAL_STATE_DEEPOFF,
+			       PSTATE_TYPE_POWERDOWN),
+	qti_make_pwrstate_lvl2(QTI_LOCAL_STATE_OFF,
+			       QTI_LOCAL_STATE_DEEPOFF,
+			       QTI_LOCAL_STATE_DEEPOFF,
+			       PSTATE_TYPE_POWERDOWN),
+	qti_make_pwrstate_lvl3(QTI_LOCAL_STATE_OFF,
+			       QTI_LOCAL_STATE_DEEPOFF,
+			       QTI_LOCAL_STATE_DEEPOFF,
+			       QTI_LOCAL_STATE_DEEPOFF,
+			       PSTATE_TYPE_POWERDOWN),
+	0,
+};
+
+/*******************************************************************************
+ * QTI standard platform handler called to check the validity of the power
+ * state parameter. The power state parameter has to be a composite power
+ * state.
+ ******************************************************************************/
+int qti_validate_power_state(unsigned int power_state,
+			     psci_power_state_t *req_state)
+{
+	unsigned int state_id;
+	int i;
+
+	assert(req_state);
+
+	/*
+	 *  Currently we are using a linear search for finding the matching
+	 *  entry in the idle power state array. This can be made a binary
+	 *  search if the number of entries justify the additional complexity.
+	 */
+	for (i = 0; !!qti_pm_idle_states[i]; i++) {
+		if (power_state == qti_pm_idle_states[i])
+			break;
+	}
+
+	/* Return error if entry not found in the idle state array */
+	if (!qti_pm_idle_states[i])
+		return PSCI_E_INVALID_PARAMS;
+
+	i = 0;
+	state_id = psci_get_pstate_id(power_state);
+
+	/* Parse the State ID and populate the state info parameter */
+	while (state_id) {
+		req_state->pwr_domain_state[i++] = state_id &
+		    QTI_LOCAL_PSTATE_MASK;
+		state_id >>= QTI_LOCAL_PSTATE_WIDTH;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * PLATFORM FUNCTIONS
+ ******************************************************************************/
+
+static void qti_set_cpupwrctlr_val(void)
+{
+	unsigned long val;
+
+	val = read_cpu_pwrctrl_val();
+	val |= QTI_CORE_PWRDN_EN_MASK;
+	write_cpu_pwrctrl_val(val);
+
+	isb();
+}
+
+/**
+ * CPU power on function - ideally we want a wrapper since this function is
+ * target specific. But to unblock teams.
+ */
+static int qti_cpu_power_on(u_register_t mpidr)
+{
+	int core_pos = plat_core_pos_by_mpidr(mpidr);
+
+	/* If not valid mpidr, return error */
+	if (core_pos < 0 || core_pos >= QTISECLIB_PLAT_CORE_COUNT) {
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	return qtiseclib_psci_node_power_on(mpidr);
+}
+
+static bool is_cpu_off(const psci_power_state_t *target_state)
+{
+	if ((target_state->pwr_domain_state[QTI_PWR_LVL0] ==
+	     QTI_LOCAL_STATE_OFF) ||
+	    (target_state->pwr_domain_state[QTI_PWR_LVL0] ==
+	     QTI_LOCAL_STATE_DEEPOFF)) {
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static void qti_cpu_power_on_finish(const psci_power_state_t *target_state)
+{
+	const uint8_t *pwr_states =
+	    (const uint8_t *)target_state->pwr_domain_state;
+	qtiseclib_psci_node_on_finish(pwr_states);
+
+	if (is_cpu_off(target_state)) {
+		plat_qti_gic_cpuif_enable();
+	}
+}
+
+static void qti_cpu_standby(plat_local_state_t cpu_state)
+{
+}
+
+static void qti_node_power_off(const psci_power_state_t *target_state)
+{
+	qtiseclib_psci_node_power_off((const uint8_t *)
+				      target_state->pwr_domain_state);
+	if (is_cpu_off(target_state)) {
+		plat_qti_gic_cpuif_disable();
+		qti_set_cpupwrctlr_val();
+	}
+}
+
+static void qti_node_suspend(const psci_power_state_t *target_state)
+{
+	qtiseclib_psci_node_suspend((const uint8_t *)target_state->
+				    pwr_domain_state);
+	if (is_cpu_off(target_state)) {
+		plat_qti_gic_cpuif_disable();
+		qti_set_cpupwrctlr_val();
+	}
+}
+
+static void qti_node_suspend_finish(const psci_power_state_t *target_state)
+{
+	const uint8_t *pwr_states =
+	    (const uint8_t *)target_state->pwr_domain_state;
+	qtiseclib_psci_node_suspend_finish(pwr_states);
+	if (is_cpu_off(target_state)) {
+		plat_qti_gic_cpuif_enable();
+	}
+}
+
+__dead2 void qti_domain_power_down_wfi(const psci_power_state_t *target_state)
+{
+
+	/* For now just do WFI - add any target specific handling if needed */
+	psci_power_down_wfi();
+	/* We should never reach here */
+}
+
+__dead2 void qti_system_off(void)
+{
+	qtiseclib_psci_system_off();
+}
+
+__dead2 void qti_system_reset(void)
+{
+	qtiseclib_psci_system_reset();
+}
+
+void qti_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	int i = 0;
+	unsigned int state_id, power_state;
+	int size = ARRAY_SIZE(qti_pm_idle_states);
+
+	/*
+	 * Find deepest state.
+	 * The arm_pm_idle_states[] array has last element by default 0,
+	 * so the real deepest state is second last element of that array.
+	 */
+	power_state = qti_pm_idle_states[size - 2];
+	state_id = psci_get_pstate_id(power_state);
+
+	/* Parse the State ID and populate the state info parameter */
+	while (state_id) {
+		req_state->pwr_domain_state[i++] =
+		    state_id & QTI_LOCAL_PSTATE_MASK;
+		state_id >>= QTI_LOCAL_PSTATE_WIDTH;
+	}
+}
+
+/*
+ * Structure containing platform specific PSCI operations. Common
+ * PSCI layer will use this.
+ */
+const plat_psci_ops_t plat_qti_psci_pm_ops = {
+	.pwr_domain_on = qti_cpu_power_on,
+	.pwr_domain_on_finish = qti_cpu_power_on_finish,
+	.cpu_standby = qti_cpu_standby,
+	.pwr_domain_off = qti_node_power_off,
+	.pwr_domain_suspend = qti_node_suspend,
+	.pwr_domain_suspend_finish = qti_node_suspend_finish,
+	.pwr_domain_pwr_down_wfi = qti_domain_power_down_wfi,
+	.system_off = qti_system_off,
+	.system_reset = qti_system_reset,
+	.get_node_hw_state = NULL,
+	.translate_power_state_by_mpidr = NULL,
+	.get_sys_suspend_power_state = qti_get_sys_suspend_power_state,
+	.validate_power_state = qti_validate_power_state,
+};
+
+/**
+ * The QTI Standard platform definition of platform porting API
+ * `plat_setup_psci_ops`.
+ */
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	int err;
+
+	err = qtiseclib_psci_init((uintptr_t)bl31_warm_entrypoint);
+	if (err == PSCI_E_SUCCESS) {
+		*psci_ops = &plat_qti_psci_pm_ops;
+	}
+
+	return err;
+}
diff --git a/plat/qti/common/src/qti_stack_protector.c b/plat/qti/common/src/qti_stack_protector.c
new file mode 100644
index 0000000..b2dbfb0
--- /dev/null
+++ b/plat/qti/common/src/qti_stack_protector.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <platform.h>
+#include <platform_def.h>
+#include <qtiseclib_interface.h>
+
+u_register_t plat_get_stack_protector_canary(void)
+{
+	u_register_t random = 0x0;
+
+	/* get random data , the below API doesn't return random = 0 in success
+	 * case */
+	qtiseclib_prng_get_data((uint8_t *) &random, sizeof(random));
+	assert(0x0 != random);
+
+	return random;
+}
diff --git a/plat/qti/common/src/qti_syscall.c b/plat/qti/common/src/qti_syscall.c
new file mode 100644
index 0000000..27c4895
--- /dev/null
+++ b/plat/qti/common/src/qti_syscall.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <lib/coreboot.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <smccc_helpers.h>
+#include <tools_share/uuid.h>
+
+#include <qti_plat.h>
+#include <qti_secure_io_cfg.h>
+#include <qtiseclib_interface.h>
+/*
+ * SIP service - SMC function IDs for SiP Service queries
+ *
+ */
+#define	QTI_SIP_SVC_CALL_COUNT_ID			U(0x0200ff00)
+#define	QTI_SIP_SVC_UID_ID				U(0x0200ff01)
+/*							0x8200ff02 is reserved */
+#define	QTI_SIP_SVC_VERSION_ID				U(0x0200ff03)
+
+/*
+ * Syscall's to allow Non Secure world accessing peripheral/IO memory
+ * those are secure/proteced BUT not required to be secure.
+ */
+#define	QTI_SIP_SVC_SECURE_IO_READ_ID		U(0x02000501)
+#define	QTI_SIP_SVC_SECURE_IO_WRITE_ID		U(0x02000502)
+
+/*
+ * Syscall's to assigns a list of intermediate PAs from a
+ * source Virtual Machine (VM) to a destination VM.
+ */
+#define	QTI_SIP_SVC_MEM_ASSIGN_ID		U(0x02000C16)
+
+#define	QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID	U(0x1)
+#define	QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID	U(0x2)
+#define	QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID		U(0x1117)
+
+#define	QTI_SIP_SVC_CALL_COUNT			U(0x3)
+#define QTI_SIP_SVC_VERSION_MAJOR		U(0x0)
+#define	QTI_SIP_SVC_VERSION_MINOR		U(0x0)
+
+#define QTI_VM_LAST				U(44)
+#define SIZE4K					U(0x1000)
+#define QTI_VM_MAX_LIST_SIZE			U(0x20)
+
+#define	FUNCID_OEN_NUM_MASK	((FUNCID_OEN_MASK << FUNCID_OEN_SHIFT)\
+				|(FUNCID_NUM_MASK << FUNCID_NUM_SHIFT))
+
+enum {
+	QTI_SIP_SUCCESS = 0,
+	QTI_SIP_NOT_SUPPORTED = -1,
+	QTI_SIP_PREEMPTED = -2,
+	QTI_SIP_INVALID_PARAM = -3,
+};
+
+/* QTI SiP Service UUID */
+DEFINE_SVC_UUID2(qti_sip_svc_uid,
+		 0x43864748, 0x217f, 0x41ad, 0xaa, 0x5a,
+		 0xba, 0xe7, 0x0f, 0xa5, 0x52, 0xaf);
+
+static bool qti_is_secure_io_access_allowed(u_register_t addr)
+{
+	int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(qti_secure_io_allowed_regs); i++) {
+		if ((uintptr_t) addr == qti_secure_io_allowed_regs[i]) {
+			return true;
+		}
+	}
+
+	return false;
+}
+
+bool qti_mem_assign_validate_param(memprot_info_t *mem_info,
+				   u_register_t u_num_mappings,
+				   uint32_t *source_vm_list,
+				   u_register_t src_vm_list_cnt,
+				   memprot_dst_vm_perm_info_t *dest_vm_list,
+				   u_register_t dst_vm_list_cnt)
+{
+	int i;
+
+	if (!source_vm_list || !dest_vm_list || !mem_info
+	    || (src_vm_list_cnt == 0)
+	    || (src_vm_list_cnt >= QTI_VM_LAST) || (dst_vm_list_cnt == 0)
+	    || (dst_vm_list_cnt >= QTI_VM_LAST) || (u_num_mappings == 0)
+	    || u_num_mappings > QTI_VM_MAX_LIST_SIZE) {
+		return false;
+	}
+	for (i = 0; i < u_num_mappings; i++) {
+		if ((mem_info[i].mem_addr & (SIZE4K - 1))
+		    || (mem_info[i].mem_size & (SIZE4K - 1))) {
+			return false;
+		}
+
+		if ((mem_info[i].mem_addr + mem_info[i].mem_size) <
+		    mem_info[i].mem_addr) {
+			return false;
+		}
+		if (coreboot_get_memory_type(mem_info[i].mem_addr) !=
+		    CB_MEM_RAM) {
+			return false;
+		}
+
+		if (coreboot_get_memory_type
+		    (mem_info[i].mem_addr + mem_info[i].mem_size) !=
+		    CB_MEM_RAM) {
+			return false;
+		}
+
+	}
+	for (i = 0; i < src_vm_list_cnt; i++) {
+		if (source_vm_list[i] >= QTI_VM_LAST) {
+			return false;
+		}
+	}
+	for (i = 0; i < dst_vm_list_cnt; i++) {
+		if (dest_vm_list[i].dst_vm >= QTI_VM_LAST) {
+			return false;
+		}
+	}
+	return true;
+}
+
+static uintptr_t qti_sip_mem_assign(void *handle, uint32_t smc_cc,
+				    u_register_t x1,
+				    u_register_t x2,
+				    u_register_t x3, u_register_t x4)
+{
+	uintptr_t dyn_map_start = 0, dyn_map_end = 0;
+	size_t dyn_map_size = 0;
+	u_register_t x6, x7;
+	int ret = QTI_SIP_NOT_SUPPORTED;
+	u_register_t x5 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5);
+
+	if (smc_cc == SMC_32) {
+		x5 = (uint32_t) x5;
+	}
+	/* Validate input arg count & retrieve arg3-6 from NS Buffer. */
+	if ((x1 != QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID) || (x5 == 0x0)) {
+		goto unmap_return;
+	}
+
+	/* Map NS Buffer. */
+	dyn_map_start = x5;
+	dyn_map_size =
+		(smc_cc ==
+		 SMC_32) ? (sizeof(uint32_t) * 4) : (sizeof(uint64_t) * 4);
+	if (qti_mmap_add_dynamic_region(dyn_map_start, dyn_map_size,
+				(MT_NS | MT_RO_DATA)) != 0) {
+		goto unmap_return;
+	}
+	/* Retrieve indirect args. */
+	if (smc_cc == SMC_32) {
+		x6 = *((uint32_t *) x5 + 1);
+		x7 = *((uint32_t *) x5 + 2);
+		x5 = *(uint32_t *) x5;
+	} else {
+		x6 = *((uint64_t *) x5 + 1);
+		x7 = *((uint64_t *) x5 + 2);
+		x5 = *(uint64_t *) x5;
+	}
+	/* Un-Map NS Buffer. */
+	if (qti_mmap_remove_dynamic_region(dyn_map_start, dyn_map_size) != 0) {
+		goto unmap_return;
+	}
+
+	/*
+	 * Map NS Buffers.
+	 * arg0,2,4 points to buffers & arg1,3,5 hold sizes.
+	 * MAP api's fail to map if it's already mapped. Let's
+	 * find lowest start & highest end address, then map once.
+	 */
+	dyn_map_start = MIN(x2, x4);
+	dyn_map_start = MIN(dyn_map_start, x6);
+	dyn_map_end = MAX((x2 + x3), (x4 + x5));
+	dyn_map_end = MAX(dyn_map_end, (x6 + x7));
+	dyn_map_size = dyn_map_end - dyn_map_start;
+
+	if (qti_mmap_add_dynamic_region(dyn_map_start, dyn_map_size,
+					(MT_NS | MT_RO_DATA)) != 0) {
+		goto unmap_return;
+	}
+	memprot_info_t *mem_info_p = (memprot_info_t *) x2;
+	uint32_t u_num_mappings = x3 / sizeof(memprot_info_t);
+	uint32_t *source_vm_list_p = (uint32_t *) x4;
+	uint32_t src_vm_list_cnt = x5 / sizeof(uint32_t);
+	memprot_dst_vm_perm_info_t *dest_vm_list_p =
+		(memprot_dst_vm_perm_info_t *) x6;
+	uint32_t dst_vm_list_cnt =
+		x7 / sizeof(memprot_dst_vm_perm_info_t);
+	if (qti_mem_assign_validate_param(mem_info_p, u_num_mappings,
+				source_vm_list_p, src_vm_list_cnt,
+				dest_vm_list_p,
+				dst_vm_list_cnt) != true) {
+		goto unmap_return;
+	}
+
+	memprot_info_t mem_info[QTI_VM_MAX_LIST_SIZE];
+	/* Populating the arguments */
+	for (int i = 0; i < u_num_mappings; i++) {
+		mem_info[i].mem_addr = mem_info_p[i].mem_addr;
+		mem_info[i].mem_size = mem_info_p[i].mem_size;
+	}
+
+	memprot_dst_vm_perm_info_t dest_vm_list[QTI_VM_LAST];
+
+	for (int i = 0; i < dst_vm_list_cnt; i++) {
+		dest_vm_list[i].dst_vm = dest_vm_list_p[i].dst_vm;
+		dest_vm_list[i].dst_vm_perm =
+			dest_vm_list_p[i].dst_vm_perm;
+		dest_vm_list[i].ctx = dest_vm_list_p[i].ctx;
+		dest_vm_list[i].ctx_size = dest_vm_list_p[i].ctx_size;
+	}
+
+	uint32_t source_vm_list[QTI_VM_LAST];
+
+	for (int i = 0; i < src_vm_list_cnt; i++) {
+		source_vm_list[i] = source_vm_list_p[i];
+	}
+	/* Un-Map NS Buffers. */
+	if (qti_mmap_remove_dynamic_region(dyn_map_start,
+				dyn_map_size) != 0) {
+		goto unmap_return;
+	}
+	/* Invoke API lib api. */
+	ret = qtiseclib_mem_assign(mem_info, u_num_mappings,
+			source_vm_list, src_vm_list_cnt,
+			dest_vm_list, dst_vm_list_cnt);
+
+	if (ret == 0) {
+		SMC_RET2(handle, QTI_SIP_SUCCESS, ret);
+	}
+unmap_return:
+	/* Un-Map NS Buffers if mapped */
+	if (dyn_map_start && dyn_map_size) {
+		qti_mmap_remove_dynamic_region(dyn_map_start, dyn_map_size);
+	}
+
+	SMC_RET2(handle, QTI_SIP_INVALID_PARAM, ret);
+}
+
+/*
+ * This function handles QTI specific syscalls. Currently only SiP calls are present.
+ * Both FAST & YIELD type call land here.
+ */
+static uintptr_t qti_sip_handler(uint32_t smc_fid,
+				 u_register_t x1,
+				 u_register_t x2,
+				 u_register_t x3,
+				 u_register_t x4,
+				 void *cookie, void *handle, u_register_t flags)
+{
+	uint32_t l_smc_fid = smc_fid & FUNCID_OEN_NUM_MASK;
+
+	if (GET_SMC_CC(smc_fid) == SMC_32) {
+		x1 = (uint32_t) x1;
+		x2 = (uint32_t) x2;
+		x3 = (uint32_t) x3;
+		x4 = (uint32_t) x4;
+	}
+
+	switch (l_smc_fid) {
+	case QTI_SIP_SVC_CALL_COUNT_ID:
+		{
+			SMC_RET1(handle, QTI_SIP_SVC_CALL_COUNT);
+			break;
+		}
+	case QTI_SIP_SVC_UID_ID:
+		{
+			/* Return UID to the caller */
+			SMC_UUID_RET(handle, qti_sip_svc_uid);
+			break;
+		}
+	case QTI_SIP_SVC_VERSION_ID:
+		{
+			/* Return the version of current implementation */
+			SMC_RET2(handle, QTI_SIP_SVC_VERSION_MAJOR,
+				 QTI_SIP_SVC_VERSION_MINOR);
+			break;
+		}
+	case QTI_SIP_SVC_SECURE_IO_READ_ID:
+		{
+			if ((x1 == QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID) &&
+			    qti_is_secure_io_access_allowed(x2)) {
+				SMC_RET2(handle, QTI_SIP_SUCCESS,
+					 *((volatile uint32_t *)x2));
+			}
+			SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
+			break;
+		}
+	case QTI_SIP_SVC_SECURE_IO_WRITE_ID:
+		{
+			if ((x1 == QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID) &&
+			    qti_is_secure_io_access_allowed(x2)) {
+				*((volatile uint32_t *)x2) = x3;
+				SMC_RET1(handle, QTI_SIP_SUCCESS);
+			}
+			SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
+			break;
+		}
+	case QTI_SIP_SVC_MEM_ASSIGN_ID:
+		{
+			return qti_sip_mem_assign(handle, GET_SMC_CC(smc_fid),
+						  x1, x2, x3, x4);
+			break;
+		}
+	default:
+		{
+			SMC_RET1(handle, QTI_SIP_NOT_SUPPORTED);
+		}
+	}
+	return (uintptr_t) handle;
+}
+
+/* Define a runtime service descriptor for both fast & yield SiP calls */
+DECLARE_RT_SVC(qti_sip_fast_svc, OEN_SIP_START,
+	       OEN_SIP_END, SMC_TYPE_FAST, NULL, qti_sip_handler);
+
+DECLARE_RT_SVC(qti_sip_yield_svc, OEN_SIP_START,
+	       OEN_SIP_END, SMC_TYPE_YIELD, NULL, qti_sip_handler);
diff --git a/plat/qti/common/src/qti_topology.c b/plat/qti/common/src/qti_topology.c
new file mode 100644
index 0000000..bf2e3f3
--- /dev/null
+++ b/plat/qti/common/src/qti_topology.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+
+#include <platform_def.h>
+#include <qti_plat.h>
+
+/* The QTI power domain tree descriptor */
+const unsigned char qti_power_domain_tree_desc[] = {
+	/* One domain to represent PDC */
+	PLAT_PDC_COUNT,
+	/* One domain to represent RSC */
+	PLAT_RSC_COUNT,
+	/* There is one top-level FCM cluster */
+	PLAT_CLUSTER_COUNT,
+	/* No. of cores in the FCM cluster */
+	PLAT_CLUSTER0_CORE_COUNT
+};
+
+/*******************************************************************************
+ * This function returns the ARM default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return qti_power_domain_tree_desc;
+}
+
+/** Function: plat_core_pos_by_mpidr
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ */
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	int core_linear_index = plat_qti_core_pos_by_mpidr(mpidr);
+
+	if (core_linear_index < PLATFORM_CORE_COUNT) {
+		return core_linear_index;
+	} else {
+		return -1;
+	}
+}