nxp: psci platform functions used by lib/psci

Signed-off-by: rocket <rod.dorris@nxp.com>
Signed-off-by: Pankaj Gupta <pankaj.gupta@nxp.com>
Change-Id: I9853263ed38fb2a9f04b9dc7d768942e32074719
diff --git a/plat/nxp/common/psci/aarch64/psci_utils.S b/plat/nxp/common/psci/aarch64/psci_utils.S
new file mode 100644
index 0000000..ea2abbf
--- /dev/null
+++ b/plat/nxp/common/psci/aarch64/psci_utils.S
@@ -0,0 +1,1155 @@
+
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+#include <lib/psci/psci.h>
+
+#include <bl31_data.h>
+#include <plat_psci.h>
+
+
+#define RESET_RETRY_CNT   800
+#define PSCI_ABORT_CNT	100
+
+#if (SOC_CORE_RELEASE)
+
+.global _psci_cpu_on
+
+/*
+ * int _psci_cpu_on(u_register_t core_mask)
+ * x0   = target cpu core mask
+ *
+ * Called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ *
+ */
+
+func _psci_cpu_on
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x6, x0
+
+	/* x0   = core mask (lsb)
+	 * x6   = core mask (lsb)
+	 */
+
+	/* check if core disabled */
+	bl   _soc_ck_disabled		/* 0-2 */
+	cbnz w0, psci_disabled
+
+	/* check core data area to see if core cannot be turned on
+	 * read the core state
+	 */
+	mov  x0, x6
+	bl   _getCoreState		/* 0-5 */
+	mov  x9, x0
+
+	/* x6   = core mask (lsb)
+	 * x9   = core state (from data area)
+	 */
+
+	cmp  x9, #CORE_DISABLED
+	mov  x0, #PSCI_E_DISABLED
+	b.eq cpu_on_done
+
+	cmp  x9, #CORE_PENDING
+	mov  x0, #PSCI_E_ON_PENDING
+	b.eq cpu_on_done
+
+	cmp  x9, #CORE_RELEASED
+	mov  x0, #PSCI_E_ALREADY_ON
+	b.eq cpu_on_done
+
+8:
+	/* x6   = core mask (lsb)
+	 * x9   = core state (from data area)
+	 */
+
+	cmp  x9, #CORE_WFE
+	b.eq core_in_wfe
+	cmp  x9, #CORE_IN_RESET
+	b.eq core_in_reset
+	cmp  x9, #CORE_OFF
+	b.eq core_is_off
+	cmp  x9, #CORE_OFF_PENDING
+
+	/* if state == CORE_OFF_PENDING, set abort */
+	mov  x0, x6
+	mov  x1, #ABORT_FLAG_DATA
+	mov  x2, #CORE_ABORT_OP
+	bl   _setCoreData		/* 0-3, [13-15] */
+
+	ldr  x3, =PSCI_ABORT_CNT
+7:
+	/* watch for abort to take effect */
+	mov  x0, x6
+	bl   _getCoreState		/* 0-5 */
+	cmp  x0, #CORE_OFF
+	b.eq core_is_off
+	cmp  x0, #CORE_PENDING
+	mov  x0, #PSCI_E_SUCCESS
+	b.eq cpu_on_done
+
+	/* loop til finished */
+	sub  x3, x3, #1
+	cbnz x3, 7b
+
+	/* if we didn't see either CORE_OFF or CORE_PENDING, then this
+	 * core is in CORE_OFF_PENDING - exit with success, as the core will
+	 * respond to the abort request
+	 */
+	mov  x0, #PSCI_E_SUCCESS
+	b    cpu_on_done
+
+/* this is where we start up a core out of reset */
+core_in_reset:
+	/* see if the soc-specific module supports this op */
+	ldr  x7, =SOC_CORE_RELEASE
+	cbnz  x7, 3f
+
+	mov  x0, #PSCI_E_NOT_SUPPORTED
+	b    cpu_on_done
+
+	/* x6   = core mask (lsb) */
+3:
+	/* set core state in data area */
+	mov  x0, x6
+	mov  x1, #CORE_PENDING
+	bl   _setCoreState   			/* 0-3, [13-15] */
+
+	/* release the core from reset */
+	mov   x0, x6
+	bl    _soc_core_release 		/* 0-3 */
+	mov   x0, #PSCI_E_SUCCESS
+	b     cpu_on_done
+
+	/* Start up the core that has been powered-down via CPU_OFF
+	 */
+core_is_off:
+	/* see if the soc-specific module supports this op
+	 */
+	ldr  x7, =SOC_CORE_RESTART
+	cbnz x7, 2f
+
+	mov  x0, #PSCI_E_NOT_SUPPORTED
+	b    cpu_on_done
+
+	/* x6   = core mask (lsb) */
+2:
+	/* set core state in data area */
+	mov  x0, x6
+	mov  x1, #CORE_WAKEUP
+	bl   _setCoreState			/* 0-3, [13-15] */
+
+	/* put the core back into service */
+	mov  x0, x6
+#if (SOC_CORE_RESTART)
+	bl   _soc_core_restart			/* 0-5 */
+#endif
+	mov  x0, #PSCI_E_SUCCESS
+	b    cpu_on_done
+
+/* this is where we release a core that is being held in wfe */
+core_in_wfe:
+	/* x6   = core mask (lsb) */
+
+	/* set core state in data area */
+	mov  x0, x6
+	mov  x1, #CORE_PENDING
+	bl   _setCoreState			/* 0-3, [13-15] */
+	dsb  sy
+	isb
+
+	/* put the core back into service */
+	sev
+	sev
+	isb
+	mov  x0, #PSCI_E_SUCCESS
+
+cpu_on_done:
+	/* restore the aarch32/64 non-volatile registers */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_cpu_on
+
+#endif
+
+
+#if (SOC_CORE_OFF)
+
+.global _psci_cpu_prep_off
+.global _psci_cpu_off_wfi
+
+/*
+ * void _psci_cpu_prep_off(u_register_t core_mask)
+ * this function performs the SoC-specific programming prior
+ * to shutting the core down
+ * x0 = core_mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_cpu_prep_off
+
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x10, x0			/* x10 = core_mask */
+
+	/* the core does not return from cpu_off, so no need
+	 * to save/restore non-volatile registers
+	 */
+
+	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
+	msr DAIFSet, #0xF
+
+	/* read cpuectlr and save current value */
+	mrs   x4, CORTEX_A72_ECTLR_EL1
+	mov   x1, #CPUECTLR_DATA
+	mov   x2, x4
+	mov   x0, x10
+	bl    _setCoreData
+
+	/* remove the core from coherency */
+	bic   x4, x4, #CPUECTLR_SMPEN_MASK
+	msr   CORTEX_A72_ECTLR_EL1, x4
+
+	/* save scr_el3 */
+	mov  x0, x10
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* x4 = scr_el3 */
+
+	/* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */
+	orr   x4, x4, #SCR_FIQ_MASK
+	msr   scr_el3, x4
+
+	/* x10 = core_mask */
+
+	/* prep the core for shutdown */
+	mov  x0, x10
+	bl   _soc_core_prep_off
+
+	/* restore the aarch32/64 non-volatile registers */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_cpu_prep_off
+
+/*
+ * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr)
+ *   - this function shuts down the core
+ *   - this function does not return!!
+ */
+
+func _psci_cpu_off_wfi
+	/* save the wakeup address */
+	mov  x29, x1
+
+	/* x0 = core_mask */
+
+	/* shutdown the core */
+	bl   _soc_core_entr_off
+
+	/* branch to resume execution */
+	br   x29
+endfunc _psci_cpu_off_wfi
+
+#endif
+
+
+#if (SOC_CORE_RESTART)
+
+.global _psci_wakeup
+
+/*
+ * void _psci_wakeup(u_register_t core_mask)
+ * this function performs the SoC-specific programming
+ * after a core wakes up from OFF
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_wakeup
+
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x4, x0			/* x4 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x4
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x4 = core mask */
+
+	/* restore CPUECTLR */
+	mov   x0, x4
+	mov   x1, #CPUECTLR_DATA
+	bl    _getCoreData
+	orr   x0, x0, #CPUECTLR_SMPEN_MASK
+	msr   CORTEX_A72_ECTLR_EL1, x0
+
+	/* x4 = core mask */
+
+	/* start the core back up */
+	mov   x0, x4
+	bl   _soc_core_exit_off
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_wakeup
+
+#endif
+
+
+#if (SOC_SYSTEM_RESET)
+
+.global _psci_system_reset
+
+func _psci_system_reset
+
+	/* system reset is mandatory
+	 * system reset is soc-specific
+	 * Note: under no circumstances do we return from this call
+	 */
+	bl   _soc_sys_reset
+endfunc _psci_system_reset
+
+#endif
+
+
+#if (SOC_SYSTEM_OFF)
+
+.global _psci_system_off
+
+func _psci_system_off
+
+	/* system off is mandatory
+	 * system off is soc-specific
+	 * Note: under no circumstances do we return from this call */
+	b    _soc_sys_off
+endfunc _psci_system_off
+
+#endif
+
+
+#if (SOC_CORE_STANDBY)
+
+.global _psci_core_entr_stdby
+.global _psci_core_prep_stdby
+.global _psci_core_exit_stdby
+
+/*
+ * void _psci_core_entr_stdby(u_register_t core_mask) - this
+ * is the fast-path for simple core standby
+ */
+
+func _psci_core_entr_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0		/* x5 = core mask */
+
+	/* save scr_el3 */
+	mov  x0, x5
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* x4 = SCR_EL3
+	 * x5 = core mask
+	 */
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* x5 = core mask */
+
+	/* put the core into standby */
+	mov  x0, x5
+	bl   _soc_core_entr_stdby
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_core_entr_stdby
+
+/*
+ * void _psci_core_prep_stdby(u_register_t core_mask) - this
+ * sets up the core to enter standby state thru the normal path
+ */
+
+func _psci_core_prep_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0
+
+	/* x5 = core mask */
+
+	/* save scr_el3 */
+	mov  x0, x5
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* x5 = core mask */
+
+	/* call for any SoC-specific programming */
+	mov  x0, x5
+	bl   _soc_core_prep_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_core_prep_stdby
+
+/*
+ * void _psci_core_exit_stdby(u_register_t core_mask) - this
+ * exits the core from standby state thru the normal path
+ */
+
+func _psci_core_exit_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0
+
+	/* x5 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x5 = core mask */
+
+	/* perform any SoC-specific programming after standby state */
+	mov  x0, x5
+	bl   _soc_core_exit_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_core_exit_stdby
+
+#endif
+
+
+#if (SOC_CORE_PWR_DWN)
+
+.global _psci_core_prep_pwrdn
+.global _psci_cpu_pwrdn_wfi
+.global _psci_core_exit_pwrdn
+
+/*
+ * void _psci_core_prep_pwrdn_(u_register_t core_mask)
+ * this function prepares the core for power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_core_prep_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x6, x0
+
+	/* x6 = core mask */
+
+	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
+	msr DAIFSet, #0xF
+
+	/* save scr_el3 */
+	mov  x0, x6
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* save cpuectlr */
+	mov  x0, x6
+	mov  x1, #CPUECTLR_DATA
+	mrs  x2, CORTEX_A72_ECTLR_EL1
+	bl   _setCoreData
+
+	/* x6 = core mask */
+
+	/* SoC-specific programming for power-down */
+	mov  x0, x6
+	bl  _soc_core_prep_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_core_prep_pwrdn
+
+/*
+ * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
+ * this function powers down the core
+ */
+
+func _psci_cpu_pwrdn_wfi
+	/* save the wakeup address */
+	mov  x29, x1
+
+	/* x0 = core mask */
+
+	/* shutdown the core */
+	bl   _soc_core_entr_pwrdn
+
+	/* branch to resume execution */
+	br   x29
+endfunc _psci_cpu_pwrdn_wfi
+
+/*
+ * void _psci_core_exit_pwrdn_(u_register_t core_mask)
+ * this function cleans up after a core power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_core_exit_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x5, x0			/* x5 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x5 = core mask */
+
+	/* restore cpuectlr */
+	mov  x0, x5
+	mov  x1, #CPUECTLR_DATA
+	bl   _getCoreData
+	/* make sure smp is set */
+	orr  x0, x0, #CPUECTLR_SMPEN_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x0
+
+	/* x5 = core mask */
+
+	/* SoC-specific cleanup */
+	mov  x0, x5
+	bl   _soc_core_exit_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_core_exit_pwrdn
+
+#endif
+
+#if (SOC_CLUSTER_STANDBY)
+
+.global _psci_clstr_prep_stdby
+.global _psci_clstr_exit_stdby
+
+/*
+ * void _psci_clstr_prep_stdby(u_register_t core_mask) - this
+ * sets up the clstr to enter standby state thru the normal path
+ */
+
+func _psci_clstr_prep_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0
+
+	/* x5 = core mask */
+
+	/* save scr_el3 */
+	mov  x0, x5
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* x5 = core mask */
+
+	/* call for any SoC-specific programming */
+	mov  x0, x5
+	bl   _soc_clstr_prep_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_clstr_prep_stdby
+
+/*
+ * void _psci_clstr_exit_stdby(u_register_t core_mask) - this
+ * exits the clstr from standby state thru the normal path
+ */
+
+func _psci_clstr_exit_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0			/* x5 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x5 = core mask */
+
+	/* perform any SoC-specific programming after standby state */
+	mov  x0, x5
+	bl   _soc_clstr_exit_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_clstr_exit_stdby
+
+#endif
+
+#if (SOC_CLUSTER_PWR_DWN)
+
+.global _psci_clstr_prep_pwrdn
+.global _psci_clstr_exit_pwrdn
+
+/*
+ * void _psci_clstr_prep_pwrdn_(u_register_t core_mask)
+ * this function prepares the cluster+core for power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_clstr_prep_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x6, x0			/* x6 = core mask */
+
+	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
+	msr DAIFSet, #0xF
+
+	/* save scr_el3 */
+	mov  x0, x6
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* save cpuectlr */
+	mov  x0, x6
+	mov  x1, #CPUECTLR_DATA
+	mrs  x2, CORTEX_A72_ECTLR_EL1
+	mov  x4, x2
+	bl   _setCoreData
+
+	/* remove core from coherency */
+	bic   x4, x4, #CPUECTLR_SMPEN_MASK
+	msr   CORTEX_A72_ECTLR_EL1, x4
+
+	/* x6 = core mask */
+
+	/* SoC-specific programming for power-down */
+	mov  x0, x6
+	bl  _soc_clstr_prep_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_clstr_prep_pwrdn
+
+/*
+ * void _psci_clstr_exit_pwrdn_(u_register_t core_mask)
+ * this function cleans up after a cluster power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_clstr_exit_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x4, x0			/* x4 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x4
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x4 = core mask */
+
+	/* restore cpuectlr */
+	mov  x0, x4
+	mov  x1, #CPUECTLR_DATA
+	bl   _getCoreData
+	/* make sure smp is set */
+	orr  x0, x0, #CPUECTLR_SMPEN_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x0
+
+	/* x4 = core mask */
+
+	/* SoC-specific cleanup */
+	mov  x0, x4
+	bl   _soc_clstr_exit_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_clstr_exit_pwrdn
+
+#endif
+
+#if (SOC_SYSTEM_STANDBY)
+
+.global _psci_sys_prep_stdby
+.global _psci_sys_exit_stdby
+
+/*
+ * void _psci_sys_prep_stdby(u_register_t core_mask) - this
+ * sets up the system to enter standby state thru the normal path
+ */
+
+func _psci_sys_prep_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0			/* x5 = core mask */
+
+	/* save scr_el3 */
+	mov  x0, x5
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* x5 = core mask */
+
+	/* call for any SoC-specific programming */
+	mov  x0, x5
+	bl   _soc_sys_prep_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_sys_prep_stdby
+
+/*
+ * void _psci_sys_exit_stdby(u_register_t core_mask) - this
+ * exits the system from standby state thru the normal path
+ */
+
+func _psci_sys_exit_stdby
+	stp  x4,  x5, [sp, #-16]!
+	stp  x6, x30, [sp, #-16]!
+
+	mov  x5, x0
+
+	/* x5 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x5
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x5 = core mask */
+
+	/* perform any SoC-specific programming after standby state */
+	mov  x0, x5
+	bl   _soc_sys_exit_stdby
+
+	ldp  x6,  x30, [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	isb
+	ret
+endfunc _psci_sys_exit_stdby
+
+#endif
+
+#if (SOC_SYSTEM_PWR_DWN)
+
+.global _psci_sys_prep_pwrdn
+.global _psci_sys_pwrdn_wfi
+.global _psci_sys_exit_pwrdn
+
+/*
+ * void _psci_sys_prep_pwrdn_(u_register_t core_mask)
+ * this function prepares the system+core for power-down
+ * x0 = core mask
+ *
+ * called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_sys_prep_pwrdn
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x6, x0			/* x6 = core mask */
+
+	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
+	msr DAIFSet, #0xF
+
+	/* save scr_el3 */
+	mov  x0, x6
+	mrs  x4, SCR_EL3
+	mov  x2, x4
+	mov  x1, #SCR_EL3_DATA
+	bl    _setCoreData
+
+	/* allow interrupts @ EL3 */
+	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
+	msr  SCR_EL3, x4
+
+	/* save cpuectlr */
+	mov  x0, x6
+	mov  x1, #CPUECTLR_DATA
+	mrs  x2, CORTEX_A72_ECTLR_EL1
+	mov  x4, x2
+	bl   _setCoreData
+
+	/* remove core from coherency */
+	bic   x4, x4, #CPUECTLR_SMPEN_MASK
+	msr   CORTEX_A72_ECTLR_EL1, x4
+
+	/* x6 = core mask */
+
+	/* SoC-specific programming for power-down */
+	mov  x0, x6
+	bl  _soc_sys_prep_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_sys_prep_pwrdn
+
+
+/*
+ * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
+ * this function powers down the system
+ */
+
+func _psci_sys_pwrdn_wfi
+	/* save the wakeup address */
+	mov  x29, x1
+
+	/* x0 = core mask */
+
+	/* shutdown the system */
+	bl   _soc_sys_pwrdn_wfi
+
+	/* branch to resume execution */
+	br   x29
+endfunc _psci_sys_pwrdn_wfi
+
+/*
+ * void _psci_sys_exit_pwrdn_(u_register_t core_mask)
+ * this function cleans up after a system power-down
+ * x0 = core mask
+ *
+ * Called from C, so save the non-volatile regs
+ * save these as pairs of registers to maintain the
+ * required 16-byte alignment on the stack
+ */
+
+func _psci_sys_exit_pwrdn
+
+	stp  x4,  x5,  [sp, #-16]!
+	stp  x6,  x7,  [sp, #-16]!
+	stp  x8,  x9,  [sp, #-16]!
+	stp  x10, x11, [sp, #-16]!
+	stp  x12, x13, [sp, #-16]!
+	stp  x14, x15, [sp, #-16]!
+	stp  x16, x17, [sp, #-16]!
+	stp  x18, x30, [sp, #-16]!
+
+	mov  x4, x0			/* x4 = core mask */
+
+	/* restore scr_el3 */
+	mov  x0, x4
+	mov  x1, #SCR_EL3_DATA
+	bl   _getCoreData
+
+	/* x0 = saved scr_el3 */
+	msr  SCR_EL3, x0
+
+	/* x4 = core mask */
+
+	/* restore cpuectlr */
+	mov  x0, x4
+	mov  x1, #CPUECTLR_DATA
+	bl   _getCoreData
+
+	/* make sure smp is set */
+	orr  x0, x0, #CPUECTLR_SMPEN_MASK
+	msr  CORTEX_A72_ECTLR_EL1, x0
+
+	/* x4 = core mask */
+
+	/* SoC-specific cleanup */
+	mov  x0, x4
+	bl   _soc_sys_exit_pwrdn
+
+	/* restore the aarch32/64 non-volatile registers
+	 */
+	ldp  x18, x30, [sp], #16
+	ldp  x16, x17, [sp], #16
+	ldp  x14, x15, [sp], #16
+	ldp  x12, x13, [sp], #16
+	ldp  x10, x11, [sp], #16
+	ldp  x8,  x9,  [sp], #16
+	ldp  x6,  x7,  [sp], #16
+	ldp  x4,  x5,  [sp], #16
+	b    psci_completed
+endfunc _psci_sys_exit_pwrdn
+
+#endif
+
+
+/* psci std returns */
+func psci_disabled
+	ldr  w0, =PSCI_E_DISABLED
+	b    psci_completed
+endfunc psci_disabled
+
+
+func psci_not_present
+	ldr  w0, =PSCI_E_NOT_PRESENT
+	b    psci_completed
+endfunc psci_not_present
+
+
+func psci_on_pending
+	ldr  w0, =PSCI_E_ON_PENDING
+	b    psci_completed
+endfunc psci_on_pending
+
+
+func psci_already_on
+	ldr  w0, =PSCI_E_ALREADY_ON
+	b    psci_completed
+endfunc psci_already_on
+
+
+func psci_failure
+	ldr  w0, =PSCI_E_INTERN_FAIL
+	b    psci_completed
+endfunc psci_failure
+
+
+func psci_unimplemented
+	ldr  w0, =PSCI_E_NOT_SUPPORTED
+	b    psci_completed
+endfunc psci_unimplemented
+
+
+func psci_denied
+	ldr  w0, =PSCI_E_DENIED
+	b    psci_completed
+endfunc psci_denied
+
+
+func psci_invalid
+	ldr  w0, =PSCI_E_INVALID_PARAMS
+	b    psci_completed
+endfunc psci_invalid
+
+
+func psci_success
+	mov  x0, #PSCI_E_SUCCESS
+endfunc psci_success
+
+
+func psci_completed
+	/* x0 = status code */
+	ret
+endfunc psci_completed
diff --git a/plat/nxp/common/psci/include/plat_psci.h b/plat/nxp/common/psci/include/plat_psci.h
new file mode 100644
index 0000000..97d4c97
--- /dev/null
+++ b/plat/nxp/common/psci/include/plat_psci.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PLAT_PSCI_H
+#define PLAT_PSCI_H
+
+ /* core abort current op */
+#define CORE_ABORT_OP     0x1
+
+ /* psci power levels - these are actually affinity levels
+  * in the psci_power_state_t array
+  */
+#define PLAT_CORE_LVL  PSCI_CPU_PWR_LVL
+#define PLAT_CLSTR_LVL U(1)
+#define PLAT_SYS_LVL   U(2)
+#define PLAT_MAX_LVL   PLAT_SYS_LVL
+
+ /* core state */
+ /* OFF states 0x0 - 0xF */
+#define CORE_IN_RESET     0x0
+#define CORE_DISABLED     0x1
+#define CORE_OFF          0x2
+#define CORE_STANDBY      0x3
+#define CORE_PWR_DOWN     0x4
+#define CORE_WFE          0x6
+#define CORE_WFI          0x7
+#define CORE_LAST	  0x8
+#define CORE_OFF_PENDING  0x9
+#define CORE_WORKING_INIT 0xA
+#define SYS_OFF_PENDING   0xB
+#define SYS_OFF           0xC
+
+ /* ON states 0x10 - 0x1F */
+#define CORE_PENDING      0x10
+#define CORE_RELEASED     0x11
+#define CORE_WAKEUP       0x12
+ /* highest off state */
+#define CORE_OFF_MAX	  0xF
+ /* lowest on state */
+#define CORE_ON_MIN       CORE_PENDING
+
+#define  DAIF_SET_MASK          0x3C0
+#define  SCTLR_I_C_M_MASK       0x00001005
+#define  SCTLR_C_MASK           0x00000004
+#define  SCTLR_I_MASK           0x00001000
+#define  CPUACTLR_L1PCTL_MASK   0x0000E000
+#define  DCSR_RCPM2_BASE        0x20170000
+#define  CPUECTLR_SMPEN_MASK    0x40
+#define  CPUECTLR_SMPEN_EN      0x40
+#define  CPUECTLR_RET_MASK      0x7
+#define  CPUECTLR_RET_SET       0x2
+#define  CPUECTLR_TIMER_MASK    0x7
+#define  CPUECTLR_TIMER_8TICKS  0x2
+#define  SCR_IRQ_MASK           0x2
+#define  SCR_FIQ_MASK           0x4
+
+/* pwr mgmt features supported in the soc-specific code:
+ *   value == 0x0, the soc code does not support this feature
+ *   value != 0x0, the soc code supports this feature
+ */
+#define SOC_CORE_RELEASE      0x1
+#define SOC_CORE_RESTART      0x1
+#define SOC_CORE_OFF          0x1
+#define SOC_CORE_STANDBY      0x1
+#define SOC_CORE_PWR_DWN      0x1
+#define SOC_CLUSTER_STANDBY   0x1
+#define SOC_CLUSTER_PWR_DWN   0x1
+#define SOC_SYSTEM_STANDBY    0x1
+#define SOC_SYSTEM_PWR_DWN    0x1
+#define SOC_SYSTEM_OFF        0x1
+#define SOC_SYSTEM_RESET      0x1
+#define SOC_SYSTEM_RESET2     0x1
+
+#ifndef __ASSEMBLER__
+
+void __dead2 _psci_system_reset(void);
+void __dead2 _psci_system_off(void);
+int _psci_cpu_on(u_register_t core_mask);
+void _psci_cpu_prep_off(u_register_t core_mask);
+void __dead2 _psci_cpu_off_wfi(u_register_t core_mask,
+				u_register_t wakeup_address);
+void __dead2 _psci_cpu_pwrdn_wfi(u_register_t core_mask,
+				u_register_t wakeup_address);
+void __dead2 _psci_sys_pwrdn_wfi(u_register_t core_mask,
+				u_register_t wakeup_address);
+void _psci_wakeup(u_register_t core_mask);
+void _psci_core_entr_stdby(u_register_t core_mask);
+void _psci_core_prep_stdby(u_register_t core_mask);
+void _psci_core_exit_stdby(u_register_t core_mask);
+void _psci_core_prep_pwrdn(u_register_t core_mask);
+void _psci_core_exit_pwrdn(u_register_t core_mask);
+void _psci_clstr_prep_stdby(u_register_t core_mask);
+void _psci_clstr_exit_stdby(u_register_t core_mask);
+void _psci_clstr_prep_pwrdn(u_register_t core_mask);
+void _psci_clstr_exit_pwrdn(u_register_t core_mask);
+void _psci_sys_prep_stdby(u_register_t core_mask);
+void _psci_sys_exit_stdby(u_register_t core_mask);
+void _psci_sys_prep_pwrdn(u_register_t core_mask);
+void _psci_sys_exit_pwrdn(u_register_t core_mask);
+
+#endif
+
+#endif /* __PLAT_PSCI_H__ */
diff --git a/plat/nxp/common/psci/plat_psci.c b/plat/nxp/common/psci/plat_psci.c
new file mode 100644
index 0000000..9281e97
--- /dev/null
+++ b/plat/nxp/common/psci/plat_psci.c
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2018-2020 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <common/debug.h>
+
+#include <plat_gic.h>
+#include <plat_common.h>
+#include <plat_psci.h>
+#ifdef NXP_WARM_BOOT
+#include <plat_warm_rst.h>
+#endif
+
+#include <platform_def.h>
+
+#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
+static void __dead2 _no_return_wfi(void)
+{
+_bl31_dead_wfi:
+	wfi();
+	goto _bl31_dead_wfi;
+}
+#endif
+
+#if (SOC_CORE_RELEASE || SOC_CORE_PWR_DWN)
+ /* the entry for core warm boot */
+static uintptr_t warmboot_entry = (uintptr_t) NULL;
+#endif
+
+#if (SOC_CORE_RELEASE)
+static int _pwr_domain_on(u_register_t mpidr)
+{
+	int core_pos = plat_core_pos(mpidr);
+	int rc = PSCI_E_INVALID_PARAMS;
+	u_register_t core_mask;
+
+	if (core_pos >= 0 && core_pos < PLATFORM_CORE_COUNT) {
+
+		_soc_set_start_addr(warmboot_entry);
+
+		dsb();
+		isb();
+
+		core_mask = (1 << core_pos);
+		rc = _psci_cpu_on(core_mask);
+	}
+
+	return (rc);
+}
+#endif
+
+#if (SOC_CORE_OFF)
+static void _pwr_domain_off(const psci_power_state_t *target_state)
+{
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state = _getCoreState(core_mask);
+
+	 /* set core state in internal data */
+	core_state = CORE_OFF_PENDING;
+	_setCoreState(core_mask, core_state);
+
+	_psci_cpu_prep_off(core_mask);
+}
+#endif
+
+#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
+static void __dead2 _pwr_down_wfi(const psci_power_state_t *target_state)
+{
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state = _getCoreState(core_mask);
+
+	switch (core_state) {
+#if (SOC_CORE_OFF)
+	case CORE_OFF_PENDING:
+		/* set core state in internal data */
+		core_state = CORE_OFF;
+		_setCoreState(core_mask, core_state);
+
+		 /* turn the core off */
+		_psci_cpu_off_wfi(core_mask, warmboot_entry);
+	break;
+#endif
+#if (SOC_CORE_PWR_DWN)
+	case CORE_PWR_DOWN:
+		 /* power-down the core */
+		_psci_cpu_pwrdn_wfi(core_mask, warmboot_entry);
+		break;
+#endif
+#if (SOC_SYSTEM_PWR_DWN)
+	case SYS_OFF_PENDING:
+		/* set core state in internal data */
+		core_state = SYS_OFF;
+		_setCoreState(core_mask, core_state);
+
+		/* power-down the system */
+		_psci_sys_pwrdn_wfi(core_mask, warmboot_entry);
+		break;
+#endif
+	default:
+		_no_return_wfi();
+	break;
+	}
+}
+#endif
+
+#if (SOC_CORE_RELEASE || SOC_CORE_RESTART)
+static void _pwr_domain_wakeup(const psci_power_state_t *target_state)
+{
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state = _getCoreState(core_mask);
+
+	switch (core_state) {
+	case CORE_PENDING: /* this core is coming out of reset */
+
+		 /* soc per cpu setup */
+		soc_init_percpu();
+
+		 /* gic per cpu setup */
+		plat_gic_pcpu_init();
+
+		 /* set core state in internal data */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+		break;
+
+#if (SOC_CORE_RESTART)
+	case CORE_WAKEUP:
+
+		 /* this core is waking up from OFF */
+		_psci_wakeup(core_mask);
+
+		 /* set core state in internal data */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+
+	break;
+#endif
+	}
+}
+#endif
+
+#if (SOC_CORE_STANDBY)
+static void _pwr_cpu_standby(plat_local_state_t  cpu_state)
+{
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state;
+
+	if (cpu_state == PLAT_MAX_RET_STATE) {
+
+		/* set core state to standby */
+		core_state = CORE_STANDBY;
+		_setCoreState(core_mask, core_state);
+
+		_psci_core_entr_stdby(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+	}
+}
+#endif
+
+#if (SOC_CORE_PWR_DWN)
+static void _pwr_suspend(const psci_power_state_t *state)
+{
+
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state;
+
+	if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) {
+#if (SOC_SYSTEM_PWR_DWN)
+		_psci_sys_prep_pwrdn(core_mask);
+
+		 /* set core state */
+		core_state = SYS_OFF_PENDING;
+		_setCoreState(core_mask, core_state);
+#endif
+	} else if (state->pwr_domain_state[PLAT_MAX_LVL]
+				== PLAT_MAX_RET_STATE) {
+#if (SOC_SYSTEM_STANDBY)
+		_psci_sys_prep_stdby(core_mask);
+
+		 /* set core state */
+		core_state = CORE_STANDBY;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
+					PLAT_MAX_OFF_STATE) {
+#if (SOC_CLUSTER_PWR_DWN)
+		_psci_clstr_prep_pwrdn(core_mask);
+
+		 /* set core state */
+		core_state = CORE_PWR_DOWN;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
+					PLAT_MAX_RET_STATE) {
+#if (SOC_CLUSTER_STANDBY)
+		_psci_clstr_prep_stdby(core_mask);
+
+		 /* set core state */
+		core_state = CORE_STANDBY;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) {
+#if (SOC_CORE_PWR_DWN)
+		 /* prep the core for power-down */
+		_psci_core_prep_pwrdn(core_mask);
+
+		 /* set core state */
+		core_state = CORE_PWR_DOWN;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) {
+#if (SOC_CORE_STANDBY)
+		_psci_core_prep_stdby(core_mask);
+
+		 /* set core state */
+		core_state = CORE_STANDBY;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+}
+#endif
+
+#if (SOC_CORE_PWR_DWN)
+static void _pwr_suspend_finish(const psci_power_state_t *state)
+{
+
+	u_register_t core_mask  = plat_my_core_mask();
+	u_register_t core_state;
+
+
+	if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) {
+#if (SOC_SYSTEM_PWR_DWN)
+		_psci_sys_exit_pwrdn(core_mask);
+
+		/* when we are here, the core is back up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	} else if (state->pwr_domain_state[PLAT_MAX_LVL]
+				== PLAT_MAX_RET_STATE) {
+#if (SOC_SYSTEM_STANDBY)
+		_psci_sys_exit_stdby(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
+						PLAT_MAX_OFF_STATE) {
+#if (SOC_CLUSTER_PWR_DWN)
+		_psci_clstr_exit_pwrdn(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
+						PLAT_MAX_RET_STATE) {
+#if (SOC_CLUSTER_STANDBY)
+		_psci_clstr_exit_stdby(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) {
+#if (SOC_CORE_PWR_DWN)
+		_psci_core_exit_pwrdn(core_mask);
+
+		/* when we are here, the core is back up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+	else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) {
+#if (SOC_CORE_STANDBY)
+		_psci_core_exit_stdby(core_mask);
+
+		/* when we are here, the core is waking up
+		 * set core state to released
+		 */
+		core_state = CORE_RELEASED;
+		_setCoreState(core_mask, core_state);
+#endif
+	}
+
+}
+#endif
+
+#if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN)
+
+#define PWR_STATE_TYPE_MASK    0x00010000
+#define PWR_STATE_TYPE_STNDBY  0x0
+#define PWR_STATE_TYPE_PWRDWN  0x00010000
+#define PWR_STATE_LVL_MASK     0x03000000
+#define PWR_STATE_LVL_CORE     0x0
+#define PWR_STATE_LVL_CLSTR    0x01000000
+#define PWR_STATE_LVL_SYS      0x02000000
+#define PWR_STATE_LVL_MAX      0x03000000
+
+ /* turns a requested power state into a target power state
+  * based on SoC capabilities
+  */
+static int _pwr_state_validate(uint32_t pwr_state,
+				    psci_power_state_t *state)
+{
+	int stat   = PSCI_E_INVALID_PARAMS;
+	int pwrdn  = (pwr_state & PWR_STATE_TYPE_MASK);
+	int lvl    = (pwr_state & PWR_STATE_LVL_MASK);
+
+	switch (lvl) {
+	case PWR_STATE_LVL_MAX:
+		if (pwrdn && SOC_SYSTEM_PWR_DWN)
+			state->pwr_domain_state[PLAT_MAX_LVL] =
+				PLAT_MAX_OFF_STATE;
+		else if (SOC_SYSTEM_STANDBY)
+			state->pwr_domain_state[PLAT_MAX_LVL] =
+				PLAT_MAX_RET_STATE;
+		 /* intentional fall-thru condition */
+	case PWR_STATE_LVL_SYS:
+		if (pwrdn && SOC_SYSTEM_PWR_DWN)
+			state->pwr_domain_state[PLAT_SYS_LVL] =
+				PLAT_MAX_OFF_STATE;
+		else if (SOC_SYSTEM_STANDBY)
+			state->pwr_domain_state[PLAT_SYS_LVL] =
+				PLAT_MAX_RET_STATE;
+		 /* intentional fall-thru condition */
+	case PWR_STATE_LVL_CLSTR:
+		if (pwrdn && SOC_CLUSTER_PWR_DWN)
+			state->pwr_domain_state[PLAT_CLSTR_LVL] =
+				PLAT_MAX_OFF_STATE;
+		else if (SOC_CLUSTER_STANDBY)
+			state->pwr_domain_state[PLAT_CLSTR_LVL] =
+				PLAT_MAX_RET_STATE;
+		 /* intentional fall-thru condition */
+	case PWR_STATE_LVL_CORE:
+		stat = PSCI_E_SUCCESS;
+
+		if (pwrdn && SOC_CORE_PWR_DWN)
+			state->pwr_domain_state[PLAT_CORE_LVL] =
+				PLAT_MAX_OFF_STATE;
+		else if (SOC_CORE_STANDBY)
+			state->pwr_domain_state[PLAT_CORE_LVL] =
+				PLAT_MAX_RET_STATE;
+		break;
+	}
+	return (stat);
+}
+
+#endif
+
+#if (SOC_SYSTEM_PWR_DWN)
+static void _pwr_state_sys_suspend(psci_power_state_t *req_state)
+{
+
+	/* if we need to have per-SoC settings, then we need to
+	 * extend this by calling into psci_utils.S and from there
+	 * on down to the SoC.S files
+	 */
+
+	req_state->pwr_domain_state[PLAT_MAX_LVL]   = PLAT_MAX_OFF_STATE;
+	req_state->pwr_domain_state[PLAT_SYS_LVL]   = PLAT_MAX_OFF_STATE;
+	req_state->pwr_domain_state[PLAT_CLSTR_LVL] = PLAT_MAX_OFF_STATE;
+	req_state->pwr_domain_state[PLAT_CORE_LVL]  = PLAT_MAX_OFF_STATE;
+
+}
+#endif
+
+#if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2)
+static int psci_system_reset2(int is_vendor,
+			      int reset_type,
+			      u_register_t cookie)
+{
+	int ret = 0;
+
+	INFO("Executing the sequence of warm reset.\n");
+	ret = prep_n_execute_warm_reset();
+
+	return ret;
+}
+#endif
+
+static plat_psci_ops_t _psci_pm_ops = {
+#if (SOC_SYSTEM_OFF)
+	.system_off = _psci_system_off,
+#endif
+#if (SOC_SYSTEM_RESET)
+	.system_reset = _psci_system_reset,
+#endif
+#if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2)
+	.system_reset2 = psci_system_reset2,
+#endif
+#if (SOC_CORE_RELEASE || SOC_CORE_RESTART)
+	 /* core released or restarted */
+	.pwr_domain_on_finish = _pwr_domain_wakeup,
+#endif
+#if (SOC_CORE_OFF)
+	 /* core shutting down */
+	.pwr_domain_off	= _pwr_domain_off,
+#endif
+#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
+	.pwr_domain_pwr_down_wfi = _pwr_down_wfi,
+#endif
+#if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN)
+	 /* cpu_suspend */
+	.validate_power_state = _pwr_state_validate,
+#if (SOC_CORE_STANDBY)
+	.cpu_standby = _pwr_cpu_standby,
+#endif
+#if (SOC_CORE_PWR_DWN)
+	.pwr_domain_suspend        = _pwr_suspend,
+	.pwr_domain_suspend_finish = _pwr_suspend_finish,
+#endif
+#endif
+#if (SOC_SYSTEM_PWR_DWN)
+	.get_sys_suspend_power_state = _pwr_state_sys_suspend,
+#endif
+#if (SOC_CORE_RELEASE)
+	 /* core executing psci_cpu_on */
+	.pwr_domain_on	= _pwr_domain_on
+#endif
+};
+
+#if (SOC_CORE_RELEASE  || SOC_CORE_PWR_DWN)
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	warmboot_entry = sec_entrypoint;
+	*psci_ops = &_psci_pm_ops;
+	return 0;
+}
+
+#else
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	*psci_ops = &_psci_pm_ops;
+	return 0;
+}
+#endif
diff --git a/plat/nxp/common/psci/psci.mk b/plat/nxp/common/psci/psci.mk
new file mode 100644
index 0000000..a2791c2
--- /dev/null
+++ b/plat/nxp/common/psci/psci.mk
@@ -0,0 +1,35 @@
+#
+# Copyright 2018-2020 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+#------------------------------------------------------------------------------
+#
+# Select the PSCI files
+#
+# -----------------------------------------------------------------------------
+
+ifeq (${ADD_PSCI},)
+
+ADD_PSCI		:= 1
+PLAT_PSCI_PATH		:= $(PLAT_COMMON_PATH)/psci
+
+PSCI_SOURCES		:= ${PLAT_PSCI_PATH}/plat_psci.c	\
+			   ${PLAT_PSCI_PATH}/$(ARCH)/psci_utils.S	\
+			   plat/common/plat_psci_common.c
+
+PLAT_INCLUDES		+= -I${PLAT_PSCI_PATH}/include
+
+ifeq (${BL_COMM_PSCI_NEEDED},yes)
+BL_COMMON_SOURCES	+= ${PSCI_SOURCES}
+else
+ifeq (${BL2_PSCI_NEEDED},yes)
+BL2_SOURCES		+= ${PSCI_SOURCES}
+endif
+ifeq (${BL31_PSCI_NEEDED},yes)
+BL31_SOURCES		+= ${PSCI_SOURCES}
+endif
+endif
+endif
+# -----------------------------------------------------------------------------