Merge "feat(mt8188): update SVP region ID protection flow" into integration
diff --git a/Makefile b/Makefile
index 2fc856a..64bccbc 100644
--- a/Makefile
+++ b/Makefile
@@ -457,6 +457,9 @@
 			ifeq ($(SPMC_AT_EL3),1)
                                 $(error SPM cannot be enabled in both S-EL2 and EL3.)
 			endif
+			ifeq ($(CTX_INCLUDE_SVE_REGS),1)
+                                $(error SVE context management not needed with Hafnium SPMC.)
+			endif
 		endif
 
 		ifeq ($(findstring optee_sp,$(ARM_SPMC_MANIFEST_DTS)),optee_sp)
@@ -975,25 +978,52 @@
 	endif
 endif #(ENABLE_SME_FOR_SWD)
 
+# Enabling SVE for SWD requires enabling SVE for NWD due to ENABLE_FEAT
+# mechanism.
 ifeq (${ENABLE_SVE_FOR_SWD},1)
-	ifeq (${ENABLE_SVE_FOR_NS},0)
-                $(error "ENABLE_SVE_FOR_SWD requires ENABLE_SVE_FOR_NS")
-	endif
-endif #(ENABLE_SVE_FOR_SWD)
+    ifeq (${ENABLE_SVE_FOR_NS},0)
+        $(error "ENABLE_SVE_FOR_SWD requires ENABLE_SVE_FOR_NS")
+    endif
+endif
 
-# SVE and SME cannot be used with CTX_INCLUDE_FPREGS since secure manager does
-# its own context management including FPU registers.
-ifeq (${CTX_INCLUDE_FPREGS},1)
-	ifneq (${ENABLE_SME_FOR_NS},0)
-                $(error "ENABLE_SME_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
-	endif
+# Enabling SVE for both the worlds typically requires the context
+# management of SVE registers. The only exception being SPMC at S-EL2.
+ifeq (${ENABLE_SVE_FOR_SWD}, 1)
+    ifneq (${ENABLE_SVE_FOR_NS}, 0)
+        ifeq (${CTX_INCLUDE_SVE_REGS}-$(SPMD_SPM_AT_SEL2),0-0)
+            $(warning "ENABLE_SVE_FOR_SWD and ENABLE_SVE_FOR_NS together require CTX_INCLUDE_SVE_REGS")
+        endif
+    endif
+endif
 
-	ifeq (${ENABLE_SVE_FOR_NS},1)
-		# Warning instead of error due to CI dependency on this
-                $(warning "ENABLE_SVE_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
-                $(warning "Forced ENABLE_SVE_FOR_NS=0")
-		override ENABLE_SVE_FOR_NS	:= 0
-	endif
+# Enabling SVE in either world while enabling CTX_INCLUDE_FPREGS requires
+# CTX_INCLUDE_SVE_REGS to be enabled due to architectural dependency between FP
+# and SVE registers.
+ifeq (${CTX_INCLUDE_FPREGS}, 1)
+    ifneq (${ENABLE_SVE_FOR_NS},0)
+        ifeq (${CTX_INCLUDE_SVE_REGS},0)
+	    # Warning instead of error due to CI dependency on this
+            $(warning "CTX_INCLUDE_FPREGS and ENABLE_SVE_FOR_NS together require CTX_INCLUDE_SVE_REGS")
+            $(warning "Forced ENABLE_SVE_FOR_NS=0")
+	    override ENABLE_SVE_FOR_NS	:= 0
+        endif
+    endif
+endif #(CTX_INCLUDE_FPREGS)
+
+# SVE context management is only required if secure world has access to SVE/FP
+# functionality.
+ifeq (${CTX_INCLUDE_SVE_REGS},1)
+    ifeq (${ENABLE_SVE_FOR_SWD},0)
+        $(error "CTX_INCLUDE_SVE_REGS requires ENABLE_SVE_FOR_SWD to also be enabled")
+    endif
+endif
+
+# SME cannot be used with CTX_INCLUDE_FPREGS since SPM does its own context
+# management including FPU registers.
+ifeq (${CTX_INCLUDE_FPREGS},1)
+    ifneq (${ENABLE_SME_FOR_NS},0)
+        $(error "ENABLE_SME_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
+    endif
 endif #(CTX_INCLUDE_FPREGS)
 
 ifeq ($(DRTM_SUPPORT),1)
@@ -1130,6 +1160,7 @@
 	CREATE_KEYS \
 	CTX_INCLUDE_AARCH32_REGS \
 	CTX_INCLUDE_FPREGS \
+	CTX_INCLUDE_SVE_REGS \
 	CTX_INCLUDE_EL2_REGS \
 	CTX_INCLUDE_MPAM_REGS \
 	DEBUG \
@@ -1168,6 +1199,7 @@
 	SEPARATE_CODE_AND_RODATA \
 	SEPARATE_BL2_NOLOAD_REGION \
 	SEPARATE_NOBITS_REGION \
+	SEPARATE_SIMD_SECTION \
 	SPIN_ON_BL1_EXIT \
 	SPM_MM \
 	SPMC_AT_EL3 \
@@ -1288,6 +1320,7 @@
 	COLD_BOOT_SINGLE_CPU \
 	CTX_INCLUDE_AARCH32_REGS \
 	CTX_INCLUDE_FPREGS \
+	CTX_INCLUDE_SVE_REGS \
 	CTX_INCLUDE_PAUTH_REGS \
 	CTX_INCLUDE_MPAM_REGS \
 	EL3_EXCEPTION_HANDLING \
@@ -1340,6 +1373,7 @@
 	SEPARATE_CODE_AND_RODATA \
 	SEPARATE_BL2_NOLOAD_REGION \
 	SEPARATE_NOBITS_REGION \
+	SEPARATE_SIMD_SECTION \
 	RECLAIM_INIT_CODE \
 	SPD_${SPD} \
 	SPIN_ON_BL1_EXIT \
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 7e9fde3..7dc71a2 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -42,10 +42,12 @@
 				bl31/bl31_context_mgmt.c			\
 				bl31/bl31_traps.c				\
 				common/runtime_svc.c				\
+				lib/cpus/errata_common.c			\
 				lib/cpus/aarch64/dsu_helpers.S			\
 				plat/common/aarch64/platform_mp_stack.S		\
 				services/arm_arch_svc/arm_arch_svc_setup.c	\
 				services/std_svc/std_svc_setup.c		\
+				lib/el3_runtime/simd_ctx.c			\
 				${PSCI_LIB_SOURCES}				\
 				${SPMD_SOURCES}					\
 				${SPM_MM_SOURCES}				\
diff --git a/changelog.yaml b/changelog.yaml
index 2b760c7..df0476f 100644
--- a/changelog.yaml
+++ b/changelog.yaml
@@ -805,6 +805,9 @@
           - title: RAS
             scope: ras
 
+          - title: SIMD
+            scope: simd
+
       - title: FCONF
         scope: fconf
 
diff --git a/docs/about/maintainers.rst b/docs/about/maintainers.rst
index cbed72f..8ac09ae 100644
--- a/docs/about/maintainers.rst
+++ b/docs/about/maintainers.rst
@@ -565,8 +565,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Abdellatif El Khlifi <abdellatif.elkhlifi@arm.com>
 :|G|: `abdellatif-elkhlifi`_
-:|M|: Xueliang Zhong <xueliang.zhong@arm.com>
-:|G|: `xueliang-zhong-arm`_
+:|M|: Hugues Kamba Mpiana <hugues.kambampiana@arm.com>
+:|G|: `hugues-kambampiana-arm`_
 :|F|: plat/arm/board/corstone700
 :|F|: plat/arm/board/a5ds
 :|F|: plat/arm/board/corstone1000
@@ -1050,6 +1050,7 @@
 .. _harrisonmutai-arm: https://github.com/harrisonmutai-arm
 .. _hilamirandakuzi1: https://github.com/hilamirandakuzi1
 .. _hzhuang1: https://github.com/hzhuang1
+.. _hugues-kambampiana-arm: https://github.com/hugueskamba
 .. _JackyBai: https://github.com/JackyBai
 .. _J-Alves: https://github.com/J-Alves
 .. _jason-ch-chen: https://github.com/jason-ch-chen
@@ -1112,7 +1113,6 @@
 .. _vijayenthiran-arm: https://github.com/vijayenthiran-arm
 .. _vishnu-banavath: https://github.com/vishnu-banavath
 .. _vwadekar: https://github.com/vwadekar
-.. _xueliang-zhong-arm: https://github.com/xueliang-zhong-arm
 .. _Yann-lms: https://github.com/Yann-lms
 
 --------------
diff --git a/docs/components/context-management-library.rst b/docs/components/context-management-library.rst
index 56ba2ec..266b82a 100644
--- a/docs/components/context-management-library.rst
+++ b/docs/components/context-management-library.rst
@@ -98,14 +98,15 @@
 
 4. **Dynamic discovery of Feature enablement by EL3**
 
-TF-A supports three states for feature enablement at EL3, to make them available
+TF-A supports four states for feature enablement at EL3, to make them available
 for lower exception levels.
 
 .. code:: c
 
-	#define FEAT_STATE_DISABLED	0
-	#define FEAT_STATE_ENABLED	1
-	#define FEAT_STATE_CHECK	2
+	#define FEAT_STATE_DISABLED     	0
+	#define FEAT_STATE_ENABLED      	1
+	#define FEAT_STATE_CHECK        	2
+	#define FEAT_STATE_CHECK_ASYMMETRIC	3
 
 A pattern is established for feature enablement behavior.
 Each feature must support the 3 possible values with rigid semantics.
@@ -119,7 +120,26 @@
 - **FEAT_STATE_CHECK** - same as ``FEAT_STATE_ALWAYS`` except that the feature's
   existence will be checked at runtime. Default on dynamic platforms (example: FVP).
 
-.. note::
+- **FEAT_STATE_CHECK_ASYMMETRIC** - same as ``FEAT_STATE_CHECK`` except that the feature's
+  existence is asymmetric across cores, which requires the feature existence is checked
+  during warmboot path also. Note that only limited number of features can be asymmetric.
+
+ .. note::
+   Only limited number of features can be ``FEAT_STATE_CHECK_ASYMMETRIC`` this is due to
+   the fact that Operating systems are designed for SMP systems.
+   There are no clear guidelines what kind of mismatch is allowed but following pointers
+   can help making a decision
+
+    - All mandatory features must be symmetric.
+    - Any feature that impacts the generation of page tables must be symmetric.
+    - Any feature access which does not trap to EL3 should be symmetric.
+    - Features related with profiling, debug and trace could be asymmetric
+    - Migration of vCPU/tasks between CPUs should not cause an error
+
+    Whenever there is asymmetric feature support is added for a feature TF-A need to add
+    feature specific code in context management code.
+
+ .. note::
    ``FEAT_RAS`` is an exception here, as it impacts the execution of EL3 and
    it is essential to know its presence at compile time. Refer to ``ENABLE_FEAT``
    macro under :ref:`Build Options` section for more details.
@@ -498,4 +518,4 @@
 .. |Context Init WarmBoot| image:: ../resources/diagrams/context_init_warmboot.png
 .. _Trustzone for AArch64: https://developer.arm.com/documentation/102418/0101/TrustZone-in-the-processor/Switching-between-Security-states
 .. _Security States with RME: https://developer.arm.com/documentation/den0126/0100/Security-states
-.. _lib/el3_runtime/(aarch32/aarch64): https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/tree/lib/el3_runtime
\ No newline at end of file
+.. _lib/el3_runtime/(aarch32/aarch64): https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/tree/lib/el3_runtime
diff --git a/docs/components/romlib-design.rst b/docs/components/romlib-design.rst
index 62c173a..c0f3ed3 100644
--- a/docs/components/romlib-design.rst
+++ b/docs/components/romlib-design.rst
@@ -71,6 +71,15 @@
 The "library at ROM" contains a necessary init function that initialises the
 global variables defined by the functions inside "library at ROM".
 
+Wrapper functions are specified at the link stage of compilation and cannot
+interpose uppon functions within the same translation unit. For example, if
+function ``fn_a`` calls ``fn_b`` within translation unit ``functions.c`` and
+the romlib jump table includes an entry for ``fn_b``, ``fn_a`` will include
+a reference to ``fn_b``'s original program text instead of the wrapper. Thus
+the jumptable author must take care to include public entry points into
+translation units to avoid paying the program text cost twice, once in the
+original executable and once in romlib.
+
 Script
 ~~~~~~
 
@@ -86,7 +95,7 @@
 
 3. ``romlib_generator.py genwrappers [args]`` - Generates a wrapper function for
    each entry in the index file except for the ones that contain the keyword
-   ``patch``. The generated wrapper file is called ``<fn_name>.s``.
+   ``patch``. The generated wrapper file is called ``wrappers.s``.
 
 4. ``romlib_generator.py pre [args]`` - Preprocesses the index file which means
    it resolves all the include commands in the file recursively. It can also
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 6147c1f..e6ca542 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -826,6 +826,10 @@
   feature is enabled and can assist the Kernel in the process of
   mitigation of the erratum.
 
+- ``ERRATA_X4_2726228``: This applies erratum 2726228 workaround to Cortex-X4
+  CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed in
+  r0p2.
+
 -  ``ERRATA_X4_2740089``: This applies errata 2740089 workaround to Cortex-X4
    CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed
    in r0p2.
@@ -833,6 +837,9 @@
 - ``ERRATA_X4_2763018``: This applies errata 2763018 workaround to Cortex-X4
   CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed in r0p2.
 
+- ``ERRATA_X4_2816013``: This applies errata 2816013 workaround to Cortex-X4
+  CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed in r0p2.
+
 For Cortex-A510, the following errata build flags are defined :
 
 -  ``ERRATA_A510_1922240``: This applies errata 1922240 workaround to
@@ -896,6 +903,10 @@
    Cortex-A520 CPU. This needs to be enabled for revisions r0p0 and r0p1.
    It is still open.
 
+-  ``ERRATA_A520_2938996``: This applies errata 2938996 workaround to
+   Cortex-A520 CPU. This needs to be enabled for revisions r0p0 and r0p1.
+   It is fixed in r0p2.
+
 For Cortex-A715, the following errata build flags are defined :
 
 -  ``ERRATA_A715_2331818``: This applies errata 2331818 workaround to
@@ -929,6 +940,14 @@
 
 For Cortex-A720, the following errata build flags are defined :
 
+-  ``ERRATA_A720_2792132``: This applies errata 2792132 workaround to
+   Cortex-A720 CPU. This needs to be enabled for revisions r0p0 and r0p1.
+   It is fixed in r0p2.
+
+-  ``ERRATA_A720_2844092``: This applies errata 2844092 workaround to
+   Cortex-A720 CPU. This needs to be enabled for revisions r0p0 and r0p1.
+   It is fixed in r0p2.
+
 -  ``ERRATA_A720_2926083``: This applies errata 2926083 workaround to
    Cortex-A720 CPU. This needs to be enabled for revisions r0p0 and r0p1.
    It is fixed in r0p2.
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 4c070ed..be38e15 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -99,6 +99,10 @@
    file that contains the BL32 private key in PEM format or a PKCS11 URI. If
    ``SAVE_KEYS=1``, only a file is accepted and it will be used to save the key.
 
+-  ``RMM``: This is an optional build option used when ``ENABLE_RME`` is set.
+   It specifies the path to RMM binary for the ``fip`` target. If the RMM option
+   is not specified, TF-A builds the TRP to load and run at R-EL2.
+
 -  ``BL33``: Path to BL33 image in the host file system. This is mandatory for
    ``fip`` target in case TF-A BL2 is used.
 
@@ -200,6 +204,13 @@
    Note that Pointer Authentication is enabled for Non-secure world irrespective
    of the value of this flag if the CPU supports it.
 
+-  ``CTX_INCLUDE_SVE_REGS``: Boolean option that, when set to 1, will cause the
+   SVE registers to be included when saving and restoring the CPU context. Note
+   that this build option requires ``ENABLE_SVE_FOR_SWD`` to be enabled. In
+   general, it is recommended to perform SVE context management in lower ELs
+   and skip in EL3 due to the additional cost of maintaining large data
+   structures to track the SVE state. Hence, the default value is 0.
+
 -  ``DEBUG``: Chooses between a debug and release build. It can take either 0
    (release) or 1 (debug) as values. 0 is the default.
 
@@ -501,21 +512,26 @@
 
 -  ``ENABLE_SVE_FOR_NS``: Numeric value to enable Scalable Vector Extension
    (SVE) for the Non-secure world only. SVE is an optional architectural feature
-   for AArch64. Note that when SVE is enabled for the Non-secure world, access
-   to SIMD and floating-point functionality from the Secure world is disabled by
-   default and controlled with ENABLE_SVE_FOR_SWD.
-   This is to avoid corruption of the Non-secure world data in the Z-registers
-   which are aliased by the SIMD and FP registers. The build option is not
-   compatible with the ``CTX_INCLUDE_FPREGS`` build option, and will raise an
-   assert on platforms where SVE is implemented and ``ENABLE_SVE_FOR_NS``
-   enabled.  This flag can take the values 0 to 2, to align with the
-   ``ENABLE_FEAT`` mechanism. At this time, this build option cannot be
-   used on systems that have SPM_MM enabled. The default is 1.
+   for AArch64. This flag can take the values 0 to 2, to align with the
+   ``ENABLE_FEAT`` mechanism. At this time, this build option cannot be used on
+   systems that have SPM_MM enabled. The default value is 2.
 
--  ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE for the Secure world.
-   SVE is an optional architectural feature for AArch64. Note that this option
-   requires ENABLE_SVE_FOR_NS to be enabled. The default is 0 and it is
-   automatically disabled when the target architecture is AArch32.
+   Note that when SVE is enabled for the Non-secure world, access
+   to SVE, SIMD and floating-point functionality from the Secure world is
+   independently controlled by build option ``ENABLE_SVE_FOR_SWD``. When enabling
+   ``CTX_INCLUDE_FPREGS`` and ``ENABLE_SVE_FOR_NS`` together, it is mandatory to
+   enable ``CTX_INCLUDE_SVE_REGS``. This is to avoid corruption of the Non-secure
+   world data in the Z-registers which are aliased by the SIMD and FP registers.
+
+-  ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE and FPU/SIMD functionality
+   for the Secure world. SVE is an optional architectural feature for AArch64.
+   The default is 0 and it is automatically disabled when the target architecture
+   is AArch32.
+
+   .. note::
+      This build flag requires ``ENABLE_SVE_FOR_NS`` to be enabled. When enabling
+      ``ENABLE_SVE_FOR_SWD``, a developer must carefully consider whether
+      ``CTX_INCLUDE_SVE_REGS`` is also needed.
 
 -  ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection
    checks in GCC. Allowed values are "all", "strong", "default" and "none". The
@@ -881,6 +897,11 @@
    flag is disabled by default and NOLOAD sections are placed in RAM immediately
    following the loaded firmware image.
 
+-  ``SEPARATE_SIMD_SECTION``: Setting this option to ``1`` allows the SIMD context
+    data structures to be put in a dedicated memory region as decided by platform
+    integrator. Default value is ``0`` which means the SIMD context is put in BSS
+    section of EL3 firmware.
+
 -  ``SMC_PCI_SUPPORT``: This option allows platforms to handle PCI configuration
    access requests via a standard SMCCC defined in `DEN0115`_. When combined with
    UEFI+ACPI this can provide a certain amount of OS forward compatibility
diff --git a/docs/plat/rockchip.rst b/docs/plat/rockchip.rst
index 53f63b5..384cd73 100644
--- a/docs/plat/rockchip.rst
+++ b/docs/plat/rockchip.rst
@@ -11,6 +11,7 @@
 -  rk3368: Octa-Core Cortex-A53
 -  rk3399: Hexa-Core Cortex-A53/A72
 -  rk3566/rk3568: Quad-Core Cortex-A55
+-  rk3588: Octa-Core Cortex-A55/A76
 
 
 Boot Sequence
diff --git a/drivers/nxp/clk/s32cc/include/s32cc-clk-regs.h b/drivers/nxp/clk/s32cc/include/s32cc-clk-regs.h
index d62eed7..84e76f7 100644
--- a/drivers/nxp/clk/s32cc/include/s32cc-clk-regs.h
+++ b/drivers/nxp/clk/s32cc/include/s32cc-clk-regs.h
@@ -9,6 +9,7 @@
 
 #define FXOSC_BASE_ADDR			(0x40050000UL)
 #define ARMPLL_BASE_ADDR		(0x40038000UL)
+#define PERIPHPLL_BASE_ADDR		(0x4003C000UL)
 #define ARM_DFS_BASE_ADDR		(0x40054000UL)
 #define CGM0_BASE_ADDR			(0x40030000UL)
 #define CGM1_BASE_ADDR			(0x40034000UL)
diff --git a/drivers/nxp/clk/s32cc/s32cc_clk_drv.c b/drivers/nxp/clk/s32cc/s32cc_clk_drv.c
index e23d928..fed16a7 100644
--- a/drivers/nxp/clk/s32cc/s32cc_clk_drv.c
+++ b/drivers/nxp/clk/s32cc/s32cc_clk_drv.c
@@ -22,6 +22,7 @@
 struct s32cc_clk_drv {
 	uintptr_t fxosc_base;
 	uintptr_t armpll_base;
+	uintptr_t periphpll_base;
 	uintptr_t armdfs_base;
 	uintptr_t cgm0_base;
 	uintptr_t cgm1_base;
@@ -42,6 +43,7 @@
 	static struct s32cc_clk_drv driver = {
 		.fxosc_base = FXOSC_BASE_ADDR,
 		.armpll_base = ARMPLL_BASE_ADDR,
+		.periphpll_base = PERIPHPLL_BASE_ADDR,
 		.armdfs_base = ARM_DFS_BASE_ADDR,
 		.cgm0_base = CGM0_BASE_ADDR,
 		.cgm1_base = CGM1_BASE_ADDR,
@@ -91,6 +93,9 @@
 	case S32CC_ARM_PLL:
 		*base = drv->armpll_base;
 		break;
+	case S32CC_PERIPH_PLL:
+		*base = drv->periphpll_base;
+		break;
 	case S32CC_ARM_DFS:
 		*base = drv->armdfs_base;
 		break;
diff --git a/drivers/nxp/clk/s32cc/s32cc_clk_modules.c b/drivers/nxp/clk/s32cc/s32cc_clk_modules.c
index c4c73c7..45e2070 100644
--- a/drivers/nxp/clk/s32cc/s32cc_clk_modules.c
+++ b/drivers/nxp/clk/s32cc/s32cc_clk_modules.c
@@ -58,6 +58,13 @@
 				 S32CC_CLK_ARM_PLL_DFS1, 0, 0, 0);
 static struct s32cc_clk cgm0_mux0_clk = S32CC_MODULE_CLK(cgm0_mux0);
 
+static struct s32cc_clkmux cgm0_mux8 =
+	S32CC_SHARED_CLKMUX_INIT(S32CC_CGM0, 8, 3,
+				 S32CC_CLK_FIRC,
+				 S32CC_CLK_PERIPH_PLL_PHI3,
+				 S32CC_CLK_FXOSC, 0, 0);
+static struct s32cc_clk cgm0_mux8_clk = S32CC_MODULE_CLK(cgm0_mux8);
+
 /* XBAR */
 static struct s32cc_clk xbar_2x_clk =
 	S32CC_CHILD_CLK(cgm0_mux0_clk, 48 * MHZ, 800 * MHZ);
@@ -82,6 +89,14 @@
 static struct s32cc_clk xbar_div6_clk =
 	S32CC_FREQ_MODULE_CLK(xbar_div12, 4 * MHZ, 66666666);
 
+/* Linflex */
+static struct s32cc_clk linflex_baud_clk =
+	S32CC_CHILD_CLK(cgm0_mux8_clk, 19200, 133333333);
+static struct s32cc_fixed_div linflex_div =
+	S32CC_FIXED_DIV_INIT(linflex_baud_clk, 2);
+static struct s32cc_clk linflex_clk =
+	S32CC_FREQ_MODULE_CLK(linflex_div, 9600, 66666666);
+
 /* MC_CGM1 */
 static struct s32cc_clkmux cgm1_mux0 =
 	S32CC_SHARED_CLKMUX_INIT(S32CC_CGM1, 0, 3,
@@ -107,7 +122,24 @@
 	S32CC_FREQ_MODULE_CLK(a53_core_div10, S32CC_A53_MIN_FREQ / 10,
 			      S32CC_A53_MAX_FREQ / 10);
 
-static struct s32cc_clk *s32cc_hw_clk_list[13] = {
+/* PERIPH PLL */
+static struct s32cc_clkmux periph_pll_mux =
+	S32CC_CLKMUX_INIT(S32CC_PERIPH_PLL, 0, 2,
+			  S32CC_CLK_FIRC,
+			  S32CC_CLK_FXOSC, 0, 0, 0);
+static struct s32cc_clk periph_pll_mux_clk =
+	S32CC_MODULE_CLK(periph_pll_mux);
+static struct s32cc_pll periphpll =
+	S32CC_PLL_INIT(periph_pll_mux_clk, S32CC_PERIPH_PLL, 2);
+static struct s32cc_clk periph_pll_vco_clk =
+	S32CC_FREQ_MODULE_CLK(periphpll, 1300 * MHZ, 2 * GHZ);
+
+static struct s32cc_pll_out_div periph_pll_phi3_div =
+	S32CC_PLL_OUT_DIV_INIT(periphpll, 3);
+static struct s32cc_clk periph_pll_phi3_clk =
+	S32CC_FREQ_MODULE_CLK(periph_pll_phi3_div, 0, 133333333);
+
+static struct s32cc_clk *s32cc_hw_clk_list[22] = {
 	/* Oscillators */
 	[S32CC_CLK_ID(S32CC_CLK_FIRC)] = &firc_clk,
 	[S32CC_CLK_ID(S32CC_CLK_SIRC)] = &sirc_clk,
@@ -116,6 +148,8 @@
 	[S32CC_CLK_ID(S32CC_CLK_ARM_PLL_PHI0)] = &arm_pll_phi0_clk,
 	/* ARM DFS */
 	[S32CC_CLK_ID(S32CC_CLK_ARM_PLL_DFS1)] = &arm_dfs1_clk,
+	/* PERIPH PLL */
+	[S32CC_CLK_ID(S32CC_CLK_PERIPH_PLL_PHI3)] = &periph_pll_phi3_clk,
 };
 
 static struct s32cc_clk_array s32cc_hw_clocks = {
@@ -124,12 +158,16 @@
 	.n_clks = ARRAY_SIZE(s32cc_hw_clk_list),
 };
 
-static struct s32cc_clk *s32cc_arch_clk_list[13] = {
+static struct s32cc_clk *s32cc_arch_clk_list[18] = {
 	/* ARM PLL */
 	[S32CC_CLK_ID(S32CC_CLK_ARM_PLL_MUX)] = &arm_pll_mux_clk,
 	[S32CC_CLK_ID(S32CC_CLK_ARM_PLL_VCO)] = &arm_pll_vco_clk,
+	/* PERIPH PLL */
+	[S32CC_CLK_ID(S32CC_CLK_PERIPH_PLL_MUX)] = &periph_pll_mux_clk,
+	[S32CC_CLK_ID(S32CC_CLK_PERIPH_PLL_VCO)] = &periph_pll_vco_clk,
 	/* MC_CGM0 */
 	[S32CC_CLK_ID(S32CC_CLK_MC_CGM0_MUX0)] = &cgm0_mux0_clk,
+	[S32CC_CLK_ID(S32CC_CLK_MC_CGM0_MUX8)] = &cgm0_mux8_clk,
 	/* XBAR */
 	[S32CC_CLK_ID(S32CC_CLK_XBAR_2X)] = &xbar_2x_clk,
 	[S32CC_CLK_ID(S32CC_CLK_XBAR)] = &xbar_clk,
@@ -143,6 +181,9 @@
 	[S32CC_CLK_ID(S32CC_CLK_A53_CORE)] = &a53_core_clk,
 	[S32CC_CLK_ID(S32CC_CLK_A53_CORE_DIV2)] = &a53_core_div2_clk,
 	[S32CC_CLK_ID(S32CC_CLK_A53_CORE_DIV10)] = &a53_core_div10_clk,
+	/* Linflex */
+	[S32CC_CLK_ID(S32CC_CLK_LINFLEX)] = &linflex_clk,
+	[S32CC_CLK_ID(S32CC_CLK_LINFLEX_BAUD)] = &linflex_baud_clk,
 };
 
 static struct s32cc_clk_array s32cc_arch_clocks = {
diff --git a/drivers/nxp/clk/s32cc/s32cc_early_clks.c b/drivers/nxp/clk/s32cc/s32cc_early_clks.c
index 2c256a5..8c4a9e8 100644
--- a/drivers/nxp/clk/s32cc/s32cc_early_clks.c
+++ b/drivers/nxp/clk/s32cc/s32cc_early_clks.c
@@ -4,15 +4,18 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 #include <drivers/clk.h>
+#include <platform_def.h>
 #include <s32cc-clk-drv.h>
 #include <s32cc-clk-ids.h>
 #include <s32cc-clk-utils.h>
 
-#define S32CC_FXOSC_FREQ	(40U * MHZ)
-#define S32CC_ARM_PLL_VCO_FREQ	(2U * GHZ)
-#define S32CC_ARM_PLL_PHI0_FREQ	(1U * GHZ)
-#define S32CC_A53_FREQ		(1U * GHZ)
-#define S32CC_XBAR_2X_FREQ	(800U * MHZ)
+#define S32CC_FXOSC_FREQ		(40U * MHZ)
+#define S32CC_ARM_PLL_VCO_FREQ		(2U * GHZ)
+#define S32CC_ARM_PLL_PHI0_FREQ		(1U * GHZ)
+#define S32CC_A53_FREQ			(1U * GHZ)
+#define S32CC_XBAR_2X_FREQ		(800U * MHZ)
+#define S32CC_PERIPH_PLL_VCO_FREQ	(2U * GHZ)
+#define S32CC_PERIPH_PLL_PHI3_FREQ	UART_CLOCK_HZ
 
 static int enable_fxosc_clk(void)
 {
@@ -63,6 +66,38 @@
 	return ret;
 }
 
+static int enable_periph_pll(void)
+{
+	int ret;
+
+	ret = clk_set_parent(S32CC_CLK_PERIPH_PLL_MUX, S32CC_CLK_FXOSC);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = clk_set_rate(S32CC_CLK_PERIPH_PLL_VCO, S32CC_PERIPH_PLL_VCO_FREQ, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = clk_set_rate(S32CC_CLK_PERIPH_PLL_PHI3, S32CC_PERIPH_PLL_PHI3_FREQ, NULL);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = clk_enable(S32CC_CLK_PERIPH_PLL_VCO);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = clk_enable(S32CC_CLK_PERIPH_PLL_PHI3);
+	if (ret != 0) {
+		return ret;
+	}
+
+	return ret;
+}
+
 static int enable_a53_clk(void)
 {
 	int ret;
@@ -109,6 +144,23 @@
 		return ret;
 	}
 
+	return ret;
+}
+
+static int enable_uart_clk(void)
+{
+	int ret;
+
+	ret = clk_set_parent(S32CC_CLK_MC_CGM0_MUX8, S32CC_CLK_PERIPH_PLL_PHI3);
+	if (ret != 0) {
+		return ret;
+	}
+
+	ret = clk_enable(S32CC_CLK_LINFLEX_BAUD);
+	if (ret != 0) {
+		return ret;
+	}
+
 	return ret;
 }
 
@@ -128,6 +180,11 @@
 		return ret;
 	}
 
+	ret = enable_periph_pll();
+	if (ret != 0) {
+		return ret;
+	}
+
 	ret = enable_a53_clk();
 	if (ret != 0) {
 		return ret;
@@ -138,5 +195,10 @@
 		return ret;
 	}
 
+	ret = enable_uart_clk();
+	if (ret != 0) {
+		return ret;
+	}
+
 	return ret;
 }
diff --git a/drivers/nxp/console/linflex_console.S b/drivers/nxp/console/linflex_console.S
index abcbb59..d8c10ef 100644
--- a/drivers/nxp/console/linflex_console.S
+++ b/drivers/nxp/console/linflex_console.S
@@ -18,6 +18,7 @@
 
 #define LINFLEX_LINSR		(0x8)
 #define LINSR_LINS_INITMODE	(0x00001000)
+#define LINSR_LINS_RX_TX_MODE	(0x00008000)
 #define LINSR_LINS_MASK		(0x0000F000)
 
 #define LINFLEX_UARTCR		(0x10)
@@ -48,9 +49,11 @@
  */
 .globl console_linflex_core_init
 .globl console_linflex_core_putc
+.globl console_linflex_core_flush
 
 .globl console_linflex_register
 .globl console_linflex_putc
+.globl console_linflex_flush
 
 /**
  * uint32_t get_ldiv_mult(uintptr_t baseaddr, uint32_t clock,
@@ -175,10 +178,29 @@
 	str	x0, [x3, #CONSOLE_T_BASE]
 
 	mov	x0, x3
-	finish_console_register linflex, putc=1, getc=0, flush=0
+	finish_console_register linflex, putc=1, getc=0, flush=1
 endfunc console_linflex_register
 
 /**
+ * int console_linflex_core_flush(uintptr_t baseaddr);
+ *
+ * Loop while the TX fifo is not empty, depending on the selected UART mode.
+ *
+ * In:  x0 - Linflex base address
+ * Clobber list : x0 - x1
+ */
+func console_linflex_core_flush
+wait_rx_tx:
+	ldr	w1, [x0, LINFLEX_LINSR]
+	and	w1, w1, #LINSR_LINS_MASK
+	cmp	w1, #LINSR_LINS_RX_TX_MODE
+	b.eq	wait_rx_tx
+
+	mov	x0, #0
+	ret
+endfunc console_linflex_core_flush
+
+/**
  * int console_linflex_core_putc(int c, uintptr_t baseaddr);
 
  * Out: w0 - printed character on success, < 0 on error.
@@ -257,3 +279,21 @@
 	mov	x0, #-EINVAL
 	ret
 endfunc console_linflex_putc
+
+/**
+ * int console_linflex_flush(console_t *console);
+ *
+ * Function to wait for the TX FIFO to be cleared.
+ * In : x0 - pointer to console_t struct
+ * Out: x0 - return -1 on error else return 0.
+ * Clobber list : x0 - x1
+ */
+func console_linflex_flush
+	cbz	x0, flush_error
+	ldr	x0, [x0, #CONSOLE_T_BASE]
+
+	b	console_linflex_core_flush
+flush_error:
+	mov	x0, #-EINVAL
+	ret
+endfunc console_linflex_flush
diff --git a/drivers/st/ddr/stm32mp2_ddr_helpers.c b/drivers/st/ddr/stm32mp2_ddr_helpers.c
new file mode 100644
index 0000000..e6be9dd
--- /dev/null
+++ b/drivers/st/ddr/stm32mp2_ddr_helpers.c
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2024, STMicroelectronics - All Rights Reserved
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include <platform_def.h>
+
+void ddr_sub_system_clk_init(void)
+{
+	mmio_write_32(stm32mp_rcc_base() + RCC_DDRCPCFGR,
+		      RCC_DDRCPCFGR_DDRCPEN | RCC_DDRCPCFGR_DDRCPLPEN);
+}
diff --git a/fdts/stm32mp25-bl2.dtsi b/fdts/stm32mp25-bl2.dtsi
index 438a58c..769aab2 100644
--- a/fdts/stm32mp25-bl2.dtsi
+++ b/fdts/stm32mp25-bl2.dtsi
@@ -1,4 +1,35 @@
 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
 /*
- * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ * Copyright (C) 2023-2024, STMicroelectronics - All Rights Reserved
  */
+
+/ {
+	soc@0 {
+#if !STM32MP_EMMC && !STM32MP_SDMMC
+		rifsc@42080000 {
+			/delete-node/ mmc@48220000;
+			/delete-node/ mmc@48230000;
+		};
+#endif
+	};
+
+	/*
+	 * UUID's here are UUID RFC 4122 compliant meaning fieds are stored in
+	 * network order (big endian)
+	 */
+
+	st-io_policies {
+		fip-handles {
+			compatible = "st,io-fip-handle";
+			fw_cfg_uuid = "5807e16a-8459-47be-8ed5-648e8dddab0e";
+			bl31_uuid = "47d4086d-4cfe-9846-9b95-2950cbbd5a00";
+			bl32_uuid = "05d0e189-53dc-1347-8d2b-500a4b7a3e38";
+			bl32_extra1_uuid = "0b70c29b-2a5a-7840-9f65-0a5682738288";
+			bl32_extra2_uuid = "8ea87bb1-cfa2-3f4d-85fd-e7bba50220d9";
+			bl33_uuid = "d6d0eea7-fcea-d54b-9782-9934f234b6e4";
+			hw_cfg_uuid = "08b8f1d9-c9cf-9349-a962-6fbc6b7265cc";
+			tos_fw_cfg_uuid = "26257c1a-dbc6-7f47-8d96-c4c4b0248021";
+			nt_fw_cfg_uuid = "28da9815-93e8-7e44-ac66-1aaf801550f9";
+		};
+	};
+};
diff --git a/fdts/stm32mp25-fw-config.dtsi b/fdts/stm32mp25-fw-config.dtsi
new file mode 100644
index 0000000..b187ad2
--- /dev/null
+++ b/fdts/stm32mp25-fw-config.dtsi
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (c) 2024, STMicroelectronics - All Rights Reserved
+ */
+
+#include <common/tbbr/tbbr_img_def.h>
+
+#include <platform_def.h>
+
+/dts-v1/;
+
+/ {
+	dtb-registry {
+		compatible = "fconf,dyn_cfg-dtb_registry";
+
+		hw-config {
+			load-address = <0x0 STM32MP_HW_CONFIG_BASE>;
+			max-size = <STM32MP_HW_CONFIG_MAX_SIZE>;
+			id = <HW_CONFIG_ID>;
+		};
+
+		nt_fw {
+			load-address = <0x0 STM32MP_BL33_BASE>;
+			max-size = <STM32MP_BL33_MAX_SIZE>;
+			id = <BL33_IMAGE_ID>;
+		};
+
+		tos_fw {
+			id = <BL32_IMAGE_ID>;
+		};
+	};
+};
diff --git a/fdts/stm32mp25-pinctrl.dtsi b/fdts/stm32mp25-pinctrl.dtsi
index 05876a3..fb12808 100644
--- a/fdts/stm32mp25-pinctrl.dtsi
+++ b/fdts/stm32mp25-pinctrl.dtsi
@@ -1,12 +1,65 @@
 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
 /*
- * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ * Copyright (C) 2023-2024, STMicroelectronics - All Rights Reserved
  * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
  */
 #include <dt-bindings/pinctrl/stm32-pinfunc.h>
 
 &pinctrl {
 	/omit-if-no-ref/
+	sdmmc1_b4_pins_a: sdmmc1-b4-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('E', 4, AF10)>, /* SDMMC1_D0 */
+				 <STM32_PINMUX('E', 5, AF10)>, /* SDMMC1_D1 */
+				 <STM32_PINMUX('E', 0, AF10)>, /* SDMMC1_D2 */
+				 <STM32_PINMUX('E', 1, AF10)>, /* SDMMC1_D3 */
+				 <STM32_PINMUX('E', 2, AF10)>; /* SDMMC1_CMD */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-disable;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('E', 3, AF10)>; /* SDMMC1_CK */
+			slew-rate = <3>;
+			drive-push-pull;
+			bias-disable;
+		};
+	};
+
+	/omit-if-no-ref/
+	sdmmc2_b4_pins_a: sdmmc2-b4-0 {
+		pins1 {
+			pinmux = <STM32_PINMUX('E', 13, AF12)>, /* SDMMC2_D0 */
+				 <STM32_PINMUX('E', 11, AF12)>, /* SDMMC2_D1 */
+				 <STM32_PINMUX('E', 8, AF12)>, /* SDMMC2_D2 */
+				 <STM32_PINMUX('E', 12, AF12)>, /* SDMMC2_D3 */
+				 <STM32_PINMUX('E', 15, AF12)>; /* SDMMC2_CMD */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+		pins2 {
+			pinmux = <STM32_PINMUX('E', 14, AF12)>; /* SDMMC2_CK */
+			slew-rate = <3>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+	};
+
+	/omit-if-no-ref/
+	sdmmc2_d47_pins_a: sdmmc2-d47-0 {
+		pins {
+			pinmux = <STM32_PINMUX('E', 10, AF12)>, /* SDMMC2_D4 */
+				 <STM32_PINMUX('E', 9, AF12)>, /* SDMMC2_D5 */
+				 <STM32_PINMUX('E', 6, AF12)>, /* SDMMC2_D6 */
+				 <STM32_PINMUX('E', 7, AF12)>; /* SDMMC2_D7 */
+			slew-rate = <2>;
+			drive-push-pull;
+			bias-pull-up;
+		};
+	};
+
+	/omit-if-no-ref/
 	usart2_pins_a: usart2-0 {
 		pins1 {
 			pinmux = <STM32_PINMUX('A', 4, AF6)>; /* USART2_TX */
diff --git a/fdts/stm32mp251.dtsi b/fdts/stm32mp251.dtsi
index 6e262bb..6f39b5a 100644
--- a/fdts/stm32mp251.dtsi
+++ b/fdts/stm32mp251.dtsi
@@ -97,6 +97,32 @@
 				resets = <&rcc USART2_R>;
 				status = "disabled";
 			};
+
+			sdmmc1: mmc@48220000 {
+				compatible = "st,stm32mp25-sdmmc2", "arm,pl18x", "arm,primecell";
+				arm,primecell-periphid = <0x00353180>;
+				reg = <0x48220000 0x400>, <0x44230400 0x8>;
+				clocks = <&rcc CK_KER_SDMMC1>;
+				clock-names = "apb_pclk";
+				resets = <&rcc SDMMC1_R>;
+				cap-sd-highspeed;
+				cap-mmc-highspeed;
+				max-frequency = <120000000>;
+				status = "disabled";
+			};
+
+			sdmmc2: mmc@48230000 {
+				compatible = "st,stm32mp25-sdmmc2", "arm,pl18x", "arm,primecell";
+				arm,primecell-periphid = <0x00353180>;
+				reg = <0x48230000 0x400>, <0x44230800 0x8>;
+				clocks = <&rcc CK_KER_SDMMC2>;
+				clock-names = "apb_pclk";
+				resets = <&rcc SDMMC2_R>;
+				cap-sd-highspeed;
+				cap-mmc-highspeed;
+				max-frequency = <120000000>;
+				status = "disabled";
+			};
 		};
 
 		bsec: efuse@44000000 {
@@ -120,6 +146,9 @@
 			nand2_otp: otp20@50 {
 				reg = <0x50 0x4>;
 			};
+			rev_otp@198 {
+				reg = <0x198 0x4>;
+			};
 			package_otp: package-otp@1e8 {
 				reg = <0x1e8 0x1>;
 			};
@@ -176,7 +205,6 @@
 			#size-cells = <1>;
 			compatible = "st,stm32mp257-pinctrl";
 			ranges = <0 0x44240000 0xa0400>;
-			pins-are-numbered;
 
 			gpioa: gpio@44240000 {
 				gpio-controller;
@@ -305,7 +333,6 @@
 			#size-cells = <1>;
 			compatible = "st,stm32mp257-z-pinctrl";
 			ranges = <0 0x46200000 0x400>;
-			pins-are-numbered;
 
 			gpioz: gpio@46200000 {
 				gpio-controller;
diff --git a/fdts/stm32mp257f-ev1-ca35tdcid-fw-config.dtsi b/fdts/stm32mp257f-ev1-ca35tdcid-fw-config.dtsi
new file mode 100644
index 0000000..e41c6b9
--- /dev/null
+++ b/fdts/stm32mp257f-ev1-ca35tdcid-fw-config.dtsi
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2024 - All Rights Reserved
+ */
+
+/*
+ * STM32MP25 tf-a firmware config
+ * Project : open
+ * Generated by XLmx tool version 2.2 - 2/27/2024 11:46:17 AM
+ */
+
+/ {
+	dtb-registry {
+		tos_fw {
+			load-address = <0x0 0x82000000>;
+			max-size = <0x2000000>;
+		};
+	};
+};
diff --git a/fdts/stm32mp257f-ev1-ca35tdcid-rcc.dtsi b/fdts/stm32mp257f-ev1-ca35tdcid-rcc.dtsi
new file mode 100644
index 0000000..3e84df5
--- /dev/null
+++ b/fdts/stm32mp257f-ev1-ca35tdcid-rcc.dtsi
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2024 - All Rights Reserved
+ * Author: Loic Pallardy loic.pallardy@foss.st.com for STMicroelectronics.
+ */
+
+/*
+ * STM32MP25 Clock tree device tree configuration
+ * Project : open
+ * Generated by XLmx tool version 2.2 - 2/27/2024 11:46:16 AM
+ */
+
+&clk_hse {
+	clock-frequency = <40000000>;
+};
+
+&clk_hsi {
+	clock-frequency = <64000000>;
+};
+
+&clk_lse {
+	clock-frequency = <32768>;
+};
+
+&clk_lsi {
+	clock-frequency = <32000>;
+};
+
+&clk_msi {
+	clock-frequency = <16000000>;
+};
+
+&rcc {
+	st,busclk = <
+		DIV_CFG(DIV_LSMCU, 1)
+		DIV_CFG(DIV_APB1, 0)
+		DIV_CFG(DIV_APB2, 0)
+		DIV_CFG(DIV_APB3, 0)
+		DIV_CFG(DIV_APB4, 0)
+		DIV_CFG(DIV_APBDBG, 0)
+	>;
+
+	st,flexgen = <
+		FLEXGEN_CFG(0, XBAR_SRC_PLL4, 0, 2)
+		FLEXGEN_CFG(1, XBAR_SRC_PLL4, 0, 5)
+		FLEXGEN_CFG(2, XBAR_SRC_PLL4, 0, 1)
+		FLEXGEN_CFG(4, XBAR_SRC_PLL4, 0, 3)
+		FLEXGEN_CFG(5, XBAR_SRC_PLL4, 0, 2)
+		FLEXGEN_CFG(8, XBAR_SRC_HSI_KER, 0, 0)
+		FLEXGEN_CFG(48, XBAR_SRC_PLL5, 0, 3)
+		FLEXGEN_CFG(51, XBAR_SRC_PLL4, 0, 5)
+		FLEXGEN_CFG(52, XBAR_SRC_PLL4, 0, 5)
+		FLEXGEN_CFG(58, XBAR_SRC_HSE, 0, 1)
+		FLEXGEN_CFG(63, XBAR_SRC_PLL4, 0, 2)
+	>;
+
+	st,kerclk = <
+		MUX_CFG(MUX_USB2PHY1, MUX_USB2PHY1_FLEX57)
+		MUX_CFG(MUX_USB2PHY2, MUX_USB2PHY2_FLEX58)
+	>;
+
+	pll1: st,pll-1 {
+		st,pll = <&pll1_cfg_1200Mhz>;
+
+		pll1_cfg_1200Mhz: pll1-cfg-1200Mhz {
+			cfg = <30 1 1 1>;
+			src = <MUX_CFG(MUX_MUXSEL5, MUXSEL_HSE)>;
+		};
+	};
+
+	pll2: st,pll-2 {
+		st,pll = <&pll2_cfg_600Mhz>;
+
+		pll2_cfg_600Mhz: pll2-cfg-600Mhz {
+			cfg = <30 1 1 2>;
+			src = <MUX_CFG(MUX_MUXSEL6, MUXSEL_HSE)>;
+		};
+	};
+
+	pll4: st,pll-4 {
+		st,pll = <&pll4_cfg_1200Mhz>;
+
+		pll4_cfg_1200Mhz: pll4-cfg-1200Mhz {
+			cfg = <30 1 1 1>;
+			src = <MUX_CFG(MUX_MUXSEL0, MUXSEL_HSE)>;
+		};
+	};
+
+	pll5: st,pll-5 {
+		st,pll = <&pll5_cfg_532Mhz>;
+
+		pll5_cfg_532Mhz: pll5-cfg-532Mhz {
+			cfg = <133 5 1 2>;
+			src = <MUX_CFG(MUX_MUXSEL1, MUXSEL_HSE)>;
+		};
+	};
+};
diff --git a/fdts/stm32mp257f-ev1-fw-config.dts b/fdts/stm32mp257f-ev1-fw-config.dts
new file mode 100644
index 0000000..9424f49
--- /dev/null
+++ b/fdts/stm32mp257f-ev1-fw-config.dts
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (c) 2024, STMicroelectronics - All Rights Reserved
+ */
+
+#include "stm32mp25-fw-config.dtsi"
+#include "stm32mp257f-ev1-ca35tdcid-fw-config.dtsi"
diff --git a/fdts/stm32mp257f-ev1.dts b/fdts/stm32mp257f-ev1.dts
index 09e83d8..6df1b30 100644
--- a/fdts/stm32mp257f-ev1.dts
+++ b/fdts/stm32mp257f-ev1.dts
@@ -6,8 +6,10 @@
 
 /dts-v1/;
 
+#include <dt-bindings/clock/stm32mp25-clksrc.h>
 #include "stm32mp257.dtsi"
 #include "stm32mp25xf.dtsi"
+#include "stm32mp257f-ev1-ca35tdcid-rcc.dtsi"
 #include "stm32mp25-pinctrl.dtsi"
 #include "stm32mp25xxai-pinctrl.dtsi"
 
@@ -35,6 +37,25 @@
 	};
 };
 
+&sdmmc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc1_b4_pins_a>;
+	st,neg-edge;
+	bus-width = <4>;
+	status = "okay";
+};
+
+&sdmmc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_a>;
+	non-removable;
+	no-sd;
+	no-sdio;
+	st,neg-edge;
+	bus-width = <8>;
+	status = "okay";
+};
+
 &usart2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&usart2_pins_a>;
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index 52ed2b9..d8ad881 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -24,6 +24,9 @@
 #define MIDR_PN_MASK		U(0xfff)
 #define MIDR_PN_SHIFT		U(0x4)
 
+/* Extracts the CPU part number from MIDR for checking CPU match */
+#define EXTRACT_PARTNUM(x)     ((x >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+
 /*******************************************************************************
  * MPIDR macros
  ******************************************************************************/
diff --git a/include/common/feat_detect.h b/include/common/feat_detect.h
index 788dfb3..b85e1ce 100644
--- a/include/common/feat_detect.h
+++ b/include/common/feat_detect.h
@@ -11,8 +11,9 @@
 void detect_arch_features(void);
 
 /* Macro Definitions */
-#define FEAT_STATE_DISABLED	0
-#define FEAT_STATE_ALWAYS	1
-#define FEAT_STATE_CHECK	2
+#define FEAT_STATE_DISABLED		0
+#define FEAT_STATE_ALWAYS		1
+#define FEAT_STATE_CHECK		2
+#define FEAT_STATE_CHECK_ASYMMETRIC	3
 
 #endif /* FEAT_DETECT_H */
diff --git a/include/drivers/nxp/clk/s32cc/s32cc-clk-ids.h b/include/drivers/nxp/clk/s32cc/s32cc-clk-ids.h
index b95cd32..de633ae 100644
--- a/include/drivers/nxp/clk/s32cc/s32cc-clk-ids.h
+++ b/include/drivers/nxp/clk/s32cc/s32cc-clk-ids.h
@@ -87,4 +87,12 @@
 #define S32CC_CLK_XBAR_DIV4			S32CC_ARCH_CLK(11)
 #define S32CC_CLK_XBAR_DIV6			S32CC_ARCH_CLK(12)
 
+/* Periph PLL */
+#define S32CC_CLK_PERIPH_PLL_MUX		S32CC_ARCH_CLK(13)
+#define S32CC_CLK_PERIPH_PLL_VCO		S32CC_ARCH_CLK(14)
+
+#define S32CC_CLK_MC_CGM0_MUX8			S32CC_ARCH_CLK(15)
+#define S32CC_CLK_LINFLEX_BAUD			S32CC_ARCH_CLK(16)
+#define S32CC_CLK_LINFLEX			S32CC_ARCH_CLK(17)
+
 #endif /* S32CC_CLK_IDS_H */
diff --git a/include/drivers/nxp/clk/s32cc/s32cc-clk-modules.h b/include/drivers/nxp/clk/s32cc/s32cc-clk-modules.h
index 703713b..a6d58cc 100644
--- a/include/drivers/nxp/clk/s32cc/s32cc-clk-modules.h
+++ b/include/drivers/nxp/clk/s32cc/s32cc-clk-modules.h
@@ -30,6 +30,7 @@
 	S32CC_SIRC,
 	S32CC_ARM_PLL,
 	S32CC_ARM_DFS,
+	S32CC_PERIPH_PLL,
 	S32CC_CGM0,
 	S32CC_CGM1,
 };
diff --git a/include/drivers/st/stm32mp2_ddr_helpers.h b/include/drivers/st/stm32mp2_ddr_helpers.h
new file mode 100644
index 0000000..069fb83
--- /dev/null
+++ b/include/drivers/st/stm32mp2_ddr_helpers.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2024, STMicroelectronics - All Rights Reserved
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STM32MP2_DDR_HELPERS_H
+#define STM32MP2_DDR_HELPERS_H
+
+void ddr_sub_system_clk_init(void);
+
+#endif /* STM32MP2_DDR_HELPERS_H */
diff --git a/include/drivers/st/stm32mp2_pwr.h b/include/drivers/st/stm32mp2_pwr.h
new file mode 100644
index 0000000..356399a
--- /dev/null
+++ b/include/drivers/st/stm32mp2_pwr.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright (c) 2018-2024, STMicroelectronics - All Rights Reserved
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STM32MP2_PWR_H
+#define STM32MP2_PWR_H
+
+#include <lib/utils_def.h>
+
+#define PWR_CR1					U(0x00)
+#define PWR_CR2					U(0x04)
+#define PWR_CR3					U(0x08)
+#define PWR_CR4					U(0x0C)
+#define PWR_CR5					U(0x10)
+#define PWR_CR6					U(0x14)
+#define PWR_CR7					U(0x18)
+#define PWR_CR8					U(0x1C)
+#define PWR_CR9					U(0x20)
+#define PWR_CR10				U(0x24)
+#define PWR_CR11				U(0x28)
+#define PWR_CR12				U(0x2C)
+#define PWR_UCPDR				U(0x30)
+#define PWR_BDCR1				U(0x38)
+#define PWR_BDCR2				U(0x3C)
+#define PWR_CPU1CR				U(0x40)
+#define PWR_CPU2CR				U(0x44)
+#define PWR_CPU3CR				U(0x48)
+#define PWR_D1CR				U(0x4C)
+#define PWR_D2CR				U(0x50)
+#define PWR_D3CR				U(0x54)
+#define PWR_WKUPCR1				U(0x60)
+#define PWR_WKUPCR2				U(0x64)
+#define PWR_WKUPCR3				U(0x68)
+#define PWR_WKUPCR4				U(0x6C)
+#define PWR_WKUPCR5				U(0x70)
+#define PWR_WKUPCR6				U(0x74)
+#define PWR_D3WKUPENR				U(0x98)
+#define PWR_RSECCFGR				U(0x100)
+#define PWR_RPRIVCFGR				U(0x104)
+#define PWR_R0CIDCFGR				U(0x108)
+#define PWR_R1CIDCFGR				U(0x10C)
+#define PWR_R2CIDCFGR				U(0x110)
+#define PWR_R3CIDCFGR				U(0x114)
+#define PWR_R4CIDCFGR				U(0x118)
+#define PWR_R5CIDCFGR				U(0x11C)
+#define PWR_R6CIDCFGR				U(0x120)
+#define PWR_WIOSECCFGR				U(0x180)
+#define PWR_WIOPRIVCFGR				U(0x184)
+#define PWR_WIO1CIDCFGR				U(0x188)
+#define PWR_WIO1SEMCR				U(0x18C)
+#define PWR_WIO2CIDCFGR				U(0x190)
+#define PWR_WIO2SEMCR				U(0x194)
+#define PWR_WIO3CIDCFGR				U(0x198)
+#define PWR_WIO3SEMCR				U(0x19C)
+#define PWR_WIO4CIDCFGR				U(0x1A0)
+#define PWR_WIO4SEMCR				U(0x1A4)
+#define PWR_WIO5CIDCFGR				U(0x1A8)
+#define PWR_WIO5SEMCR				U(0x1AC)
+#define PWR_WIO6CIDCFGR				U(0x1B0)
+#define PWR_WIO6SEMCR				U(0x1B4)
+#define PWR_CPU1D1SR				U(0x200)
+#define PWR_CPU2D2SR				U(0x204)
+#define PWR_CPU3D3SR				U(0x208)
+#define PWR_DBGR				U(0x308)
+#define PWR_VERR				U(0x3F4)
+#define PWR_IPIDR				U(0x3F8)
+#define PWR_SIDR				U(0x3FC)
+
+/* PWR_CR1 register fields */
+#define PWR_CR1_VDDIO3VMEN			BIT_32(0)
+#define PWR_CR1_VDDIO4VMEN			BIT_32(1)
+#define PWR_CR1_USB33VMEN			BIT_32(2)
+#define PWR_CR1_UCPDVMEN			BIT_32(3)
+#define PWR_CR1_AVMEN				BIT_32(4)
+#define PWR_CR1_VDDIO3SV			BIT_32(8)
+#define PWR_CR1_VDDIO4SV			BIT_32(9)
+#define PWR_CR1_USB33SV				BIT_32(10)
+#define PWR_CR1_UCPDSV				BIT_32(11)
+#define PWR_CR1_ASV				BIT_32(12)
+#define PWR_CR1_VDDIO3RDY			BIT_32(16)
+#define PWR_CR1_VDDIO4RDY			BIT_32(17)
+#define PWR_CR1_USB33RDY			BIT_32(18)
+#define PWR_CR1_UCPDRDY				BIT_32(19)
+#define PWR_CR1_ARDY				BIT_32(20)
+#define PWR_CR1_VDDIOVRSEL			BIT_32(24)
+#define PWR_CR1_VDDIO3VRSEL			BIT_32(25)
+#define PWR_CR1_VDDIO4VRSEL			BIT_32(26)
+#define PWR_CR1_GPVMO				BIT_32(31)
+
+/* PWR_CR2 register fields */
+#define PWR_CR2_MONEN				BIT_32(0)
+#define PWR_CR2_VBATL				BIT_32(8)
+#define PWR_CR2_VBATH				BIT_32(9)
+#define PWR_CR2_TEMPL				BIT_32(10)
+#define PWR_CR2_TEMPH				BIT_32(11)
+
+/* PWR_CR3 register fields */
+#define PWR_CR3_PVDEN				BIT_32(0)
+#define PWR_CR3_PVDO				BIT_32(8)
+
+/* PWR_CR5 register fields */
+#define PWR_CR5_VCOREMONEN			BIT_32(0)
+#define PWR_CR5_VCOREL				BIT_32(8)
+#define PWR_CR5_VCOREH				BIT_32(9)
+
+/* PWR_CR6 register fields */
+#define PWR_CR6_VCPUMONEN			BIT_32(0)
+#define PWR_CR6_VCPULLS				BIT_32(4)
+#define PWR_CR6_VCPUL				BIT_32(8)
+#define PWR_CR6_VCPUH				BIT_32(9)
+
+/* PWR_CR7 register fields */
+#define PWR_CR7_VDDIO2VMEN			BIT_32(0)
+#define PWR_CR7_VDDIO2SV			BIT_32(8)
+#define PWR_CR7_VDDIO2RDY			BIT_32(16)
+#define PWR_CR7_VDDIO2VRSEL			BIT_32(24)
+#define PWR_CR7_VDDIO2VRSTBY			BIT_32(25)
+
+/* PWR_CR8 register fields */
+#define PWR_CR8_VDDIO1VMEN			BIT_32(0)
+#define PWR_CR8_VDDIO1SV			BIT_32(8)
+#define PWR_CR8_VDDIO1RDY			BIT_32(16)
+#define PWR_CR8_VDDIO1VRSEL			BIT_32(24)
+#define PWR_CR8_VDDIO1VRSTBY			BIT_32(25)
+
+/* PWR_CR9 register fields */
+#define PWR_CR9_BKPRBSEN			BIT_32(0)
+#define PWR_CR9_LPR1BSEN			BIT_32(4)
+
+/* PWR_CR10 register fields */
+#define PWR_CR10_RETRBSEN_MASK			GENMASK_32(1, 0)
+#define PWR_CR10_RETRBSEN_SHIFT			U(0)
+
+/* PWR_CR11 register fields */
+#define PWR_CR11_DDRRETDIS			BIT_32(0)
+
+/* PWR_CR12 register fields */
+#define PWR_CR12_GPUVMEN			BIT_32(0)
+#define PWR_CR12_GPULVTEN			BIT_32(1)
+#define PWR_CR12_GPUSV				BIT_32(8)
+#define PWR_CR12_VDDGPURDY			BIT_32(16)
+
+/* PWR_UCPDR register fields */
+#define PWR_UCPDR_UCPD_DBDIS			BIT_32(0)
+#define PWR_UCPDR_UCPD_STBY			BIT_32(1)
+
+/* PWR_BDCR1 register fields */
+#define PWR_BDCR1_DBD3P				BIT_32(0)
+
+/* PWR_BDCR2 register fields */
+#define PWR_BDCR2_DBP				BIT_32(0)
+
+/* PWR_CPU1CR register fields */
+#define PWR_CPU1CR_PDDS_D2			BIT_32(0)
+#define PWR_CPU1CR_PDDS_D1			BIT_32(1)
+#define PWR_CPU1CR_VBF				BIT_32(4)
+#define PWR_CPU1CR_STOPF			BIT_32(5)
+#define PWR_CPU1CR_SBF				BIT_32(6)
+#define PWR_CPU1CR_SBF_D1			BIT_32(7)
+#define PWR_CPU1CR_SBF_D3			BIT_32(8)
+#define PWR_CPU1CR_CSSF				BIT_32(9)
+#define PWR_CPU1CR_STANDBYWFIL2			BIT_32(15)
+#define PWR_CPU1CR_LPDS_D1			BIT_32(16)
+#define PWR_CPU1CR_LVDS_D1			BIT_32(17)
+
+/* PWR_CPU2CR register fields */
+#define PWR_CPU2CR_PDDS_D2			BIT_32(0)
+#define PWR_CPU2CR_VBF				BIT_32(4)
+#define PWR_CPU2CR_STOPF			BIT_32(5)
+#define PWR_CPU2CR_SBF				BIT_32(6)
+#define PWR_CPU2CR_SBF_D2			BIT_32(7)
+#define PWR_CPU2CR_SBF_D3			BIT_32(8)
+#define PWR_CPU2CR_CSSF				BIT_32(9)
+#define PWR_CPU2CR_DEEPSLEEP			BIT_32(15)
+#define PWR_CPU2CR_LPDS_D2			BIT_32(16)
+#define PWR_CPU2CR_LVDS_D2			BIT_32(17)
+
+/* PWR_CPU3CR register fields */
+#define PWR_CPU3CR_VBF				BIT_32(4)
+#define PWR_CPU3CR_SBF_D3			BIT_32(8)
+#define PWR_CPU3CR_CSSF				BIT_32(9)
+#define PWR_CPU3CR_DEEPSLEEP			BIT_32(15)
+
+/* PWR_D1CR register fields */
+#define PWR_D1CR_LPCFG_D1			BIT_32(0)
+#define PWR_D1CR_POPL_D1_MASK			GENMASK_32(12, 8)
+#define PWR_D1CR_POPL_D1_SHIFT			U(8)
+
+/* PWR_D2CR register fields */
+#define PWR_D2CR_LPCFG_D2			BIT_32(0)
+#define PWR_D2CR_POPL_D2_MASK			GENMASK_32(12, 8)
+#define PWR_D2CR_POPL_D2_SHIFT			U(8)
+#define PWR_D2CR_LPLVDLY_D2_MASK		GENMASK_32(18, 16)
+#define PWR_D2CR_LPLVDLY_D2_SHIFT		U(16)
+#define PWR_D2CR_PODH_D2_MASK			GENMASK_32(27, 24)
+#define PWR_D2CR_PODH_D2_SHIFT			U(24)
+
+/* PWR_D3CR register fields */
+#define PWR_D3CR_PDDS_D3			BIT_32(0)
+#define PWR_D3CR_D3RDY				BIT_32(31)
+
+/* PWR_WKUPCR1 register fields */
+#define PWR_WKUPCR1_WKUPC			BIT_32(0)
+#define PWR_WKUPCR1_WKUPP			BIT_32(8)
+#define PWR_WKUPCR1_WKUPPUPD_MASK		GENMASK_32(13, 12)
+#define PWR_WKUPCR1_WKUPPUPD_SHIFT		U(12)
+#define PWR_WKUPCR1_WKUPENCPU1			BIT_32(16)
+#define PWR_WKUPCR1_WKUPENCPU2			BIT_32(17)
+#define PWR_WKUPCR1_WKUPF			BIT_32(31)
+
+/* PWR_WKUPCR2 register fields */
+#define PWR_WKUPCR2_WKUPC			BIT_32(0)
+#define PWR_WKUPCR2_WKUPP			BIT_32(8)
+#define PWR_WKUPCR2_WKUPPUPD_MASK		GENMASK_32(13, 12)
+#define PWR_WKUPCR2_WKUPPUPD_SHIFT		U(12)
+#define PWR_WKUPCR2_WKUPENCPU1			BIT_32(16)
+#define PWR_WKUPCR2_WKUPENCPU2			BIT_32(17)
+#define PWR_WKUPCR2_WKUPF			BIT_32(31)
+
+/* PWR_WKUPCR3 register fields */
+#define PWR_WKUPCR3_WKUPC			BIT_32(0)
+#define PWR_WKUPCR3_WKUPP			BIT_32(8)
+#define PWR_WKUPCR3_WKUPPUPD_MASK		GENMASK_32(13, 12)
+#define PWR_WKUPCR3_WKUPPUPD_SHIFT		U(12)
+#define PWR_WKUPCR3_WKUPENCPU1			BIT_32(16)
+#define PWR_WKUPCR3_WKUPENCPU2			BIT_32(17)
+#define PWR_WKUPCR3_WKUPF			BIT_32(31)
+
+/* PWR_WKUPCR4 register fields */
+#define PWR_WKUPCR4_WKUPC			BIT_32(0)
+#define PWR_WKUPCR4_WKUPP			BIT_32(8)
+#define PWR_WKUPCR4_WKUPPUPD_MASK		GENMASK_32(13, 12)
+#define PWR_WKUPCR4_WKUPPUPD_SHIFT		U(12)
+#define PWR_WKUPCR4_WKUPENCPU1			BIT_32(16)
+#define PWR_WKUPCR4_WKUPENCPU2			BIT_32(17)
+#define PWR_WKUPCR4_WKUPF			BIT_32(31)
+
+/* PWR_WKUPCR5 register fields */
+#define PWR_WKUPCR5_WKUPC			BIT_32(0)
+#define PWR_WKUPCR5_WKUPP			BIT_32(8)
+#define PWR_WKUPCR5_WKUPPUPD_MASK		GENMASK_32(13, 12)
+#define PWR_WKUPCR5_WKUPPUPD_SHIFT		U(12)
+#define PWR_WKUPCR5_WKUPENCPU1			BIT_32(16)
+#define PWR_WKUPCR5_WKUPENCPU2			BIT_32(17)
+#define PWR_WKUPCR5_WKUPF			BIT_32(31)
+
+/* PWR_WKUPCR6 register fields */
+#define PWR_WKUPCR6_WKUPC			BIT_32(0)
+#define PWR_WKUPCR6_WKUPP			BIT_32(8)
+#define PWR_WKUPCR6_WKUPPUPD_MASK		GENMASK_32(13, 12)
+#define PWR_WKUPCR6_WKUPPUPD_SHIFT		U(12)
+#define PWR_WKUPCR6_WKUPENCPU1			BIT_32(16)
+#define PWR_WKUPCR6_WKUPENCPU2			BIT_32(17)
+#define PWR_WKUPCR6_WKUPF			BIT_32(31)
+
+/* PWR_D3WKUPENR register fields */
+#define PWR_D3WKUPENR_TAMP_WKUPEN_D3		BIT_32(0)
+
+/* PWR_RSECCFGR register fields */
+#define PWR_RSECCFGR_RSEC0			BIT_32(0)
+#define PWR_RSECCFGR_RSEC1			BIT_32(1)
+#define PWR_RSECCFGR_RSEC2			BIT_32(2)
+#define PWR_RSECCFGR_RSEC3			BIT_32(3)
+#define PWR_RSECCFGR_RSEC4			BIT_32(4)
+#define PWR_RSECCFGR_RSEC5			BIT_32(5)
+#define PWR_RSECCFGR_RSEC6			BIT_32(6)
+
+/* PWR_RPRIVCFGR register fields */
+#define PWR_RPRIVCFGR_RPRIV0			BIT_32(0)
+#define PWR_RPRIVCFGR_RPRIV1			BIT_32(1)
+#define PWR_RPRIVCFGR_RPRIV2			BIT_32(2)
+#define PWR_RPRIVCFGR_RPRIV3			BIT_32(3)
+#define PWR_RPRIVCFGR_RPRIV4			BIT_32(4)
+#define PWR_RPRIVCFGR_RPRIV5			BIT_32(5)
+#define PWR_RPRIVCFGR_RPRIV6			BIT_32(6)
+
+/* PWR_R0CIDCFGR register fields */
+#define PWR_R0CIDCFGR_CFEN			BIT_32(0)
+#define PWR_R0CIDCFGR_SCID_MASK			GENMASK_32(6, 4)
+#define PWR_R0CIDCFGR_SCID_SHIFT		U(4)
+
+/* PWR_R1CIDCFGR register fields */
+#define PWR_R1CIDCFGR_CFEN			BIT_32(0)
+#define PWR_R1CIDCFGR_SCID_MASK			GENMASK_32(6, 4)
+#define PWR_R1CIDCFGR_SCID_SHIFT		U(4)
+
+/* PWR_R2CIDCFGR register fields */
+#define PWR_R2CIDCFGR_CFEN			BIT_32(0)
+#define PWR_R2CIDCFGR_SCID_MASK			GENMASK_32(6, 4)
+#define PWR_R2CIDCFGR_SCID_SHIFT		U(4)
+
+/* PWR_R3CIDCFGR register fields */
+#define PWR_R3CIDCFGR_CFEN			BIT_32(0)
+#define PWR_R3CIDCFGR_SCID_MASK			GENMASK_32(6, 4)
+#define PWR_R3CIDCFGR_SCID_SHIFT		U(4)
+
+/* PWR_R4CIDCFGR register fields */
+#define PWR_R4CIDCFGR_CFEN			BIT_32(0)
+#define PWR_R4CIDCFGR_SCID_MASK			GENMASK_32(6, 4)
+#define PWR_R4CIDCFGR_SCID_SHIFT		U(4)
+
+/* PWR_R5CIDCFGR register fields */
+#define PWR_R5CIDCFGR_CFEN			BIT_32(0)
+#define PWR_R5CIDCFGR_SCID_MASK			GENMASK_32(6, 4)
+#define PWR_R5CIDCFGR_SCID_SHIFT		U(4)
+
+/* PWR_R6CIDCFGR register fields */
+#define PWR_R6CIDCFGR_CFEN			BIT_32(0)
+#define PWR_R6CIDCFGR_SCID_MASK			GENMASK_32(6, 4)
+#define PWR_R6CIDCFGR_SCID_SHIFT		U(4)
+
+/* PWR_WIOSECCFGR register fields */
+#define PWR_WIOSECCFGR_WIOSEC1			BIT_32(0)
+#define PWR_WIOSECCFGR_WIOSEC2			BIT_32(1)
+#define PWR_WIOSECCFGR_WIOSEC3			BIT_32(2)
+#define PWR_WIOSECCFGR_WIOSEC4			BIT_32(3)
+#define PWR_WIOSECCFGR_WIOSEC5			BIT_32(4)
+#define PWR_WIOSECCFGR_WIOSEC6			BIT_32(5)
+
+/* PWR_WIOPRIVCFGR register fields */
+#define PWR_WIOPRIVCFGR_WIOPRIV1		BIT_32(0)
+#define PWR_WIOPRIVCFGR_WIOPRIV2		BIT_32(1)
+#define PWR_WIOPRIVCFGR_WIOPRIV3		BIT_32(2)
+#define PWR_WIOPRIVCFGR_WIOPRIV4		BIT_32(3)
+#define PWR_WIOPRIVCFGR_WIOPRIV5		BIT_32(4)
+#define PWR_WIOPRIVCFGR_WIOPRIV6		BIT_32(5)
+
+/* PWR_WIO1CIDCFGR register fields */
+#define PWR_WIO1CIDCFGR_CFEN			BIT_32(0)
+#define PWR_WIO1CIDCFGR_SEM_EN			BIT_32(1)
+#define PWR_WIO1CIDCFGR_SCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO1CIDCFGR_SCID_SHIFT		U(4)
+#define PWR_WIO1CIDCFGR_SEMWLC0			BIT_32(16)
+#define PWR_WIO1CIDCFGR_SEMWLC1			BIT_32(17)
+#define PWR_WIO1CIDCFGR_SEMWLC2			BIT_32(18)
+#define PWR_WIO1CIDCFGR_SEMWLC3			BIT_32(19)
+#define PWR_WIO1CIDCFGR_SEMWLC4			BIT_32(20)
+#define PWR_WIO1CIDCFGR_SEMWLC5			BIT_32(21)
+#define PWR_WIO1CIDCFGR_SEMWLC6			BIT_32(22)
+#define PWR_WIO1CIDCFGR_SEMWLC7			BIT_32(23)
+
+/* PWR_WIO1SEMCR register fields */
+#define PWR_WIO1SEMCR_SEM_MUTEX			BIT_32(0)
+#define PWR_WIO1SEMCR_SEMCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO1SEMCR_SEMCID_SHIFT		U(4)
+
+/* PWR_WIO2CIDCFGR register fields */
+#define PWR_WIO2CIDCFGR_CFEN			BIT_32(0)
+#define PWR_WIO2CIDCFGR_SEM_EN			BIT_32(1)
+#define PWR_WIO2CIDCFGR_SCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO2CIDCFGR_SCID_SHIFT		U(4)
+#define PWR_WIO2CIDCFGR_SEMWLC0			BIT_32(16)
+#define PWR_WIO2CIDCFGR_SEMWLC1			BIT_32(17)
+#define PWR_WIO2CIDCFGR_SEMWLC2			BIT_32(18)
+#define PWR_WIO2CIDCFGR_SEMWLC3			BIT_32(19)
+#define PWR_WIO2CIDCFGR_SEMWLC4			BIT_32(20)
+#define PWR_WIO2CIDCFGR_SEMWLC5			BIT_32(21)
+#define PWR_WIO2CIDCFGR_SEMWLC6			BIT_32(22)
+#define PWR_WIO2CIDCFGR_SEMWLC7			BIT_32(23)
+
+/* PWR_WIO2SEMCR register fields */
+#define PWR_WIO2SEMCR_SEM_MUTEX			BIT_32(0)
+#define PWR_WIO2SEMCR_SEMCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO2SEMCR_SEMCID_SHIFT		U(4)
+
+/* PWR_WIO3CIDCFGR register fields */
+#define PWR_WIO3CIDCFGR_CFEN			BIT_32(0)
+#define PWR_WIO3CIDCFGR_SEM_EN			BIT_32(1)
+#define PWR_WIO3CIDCFGR_SCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO3CIDCFGR_SCID_SHIFT		U(4)
+#define PWR_WIO3CIDCFGR_SEMWLC0			BIT_32(16)
+#define PWR_WIO3CIDCFGR_SEMWLC1			BIT_32(17)
+#define PWR_WIO3CIDCFGR_SEMWLC2			BIT_32(18)
+#define PWR_WIO3CIDCFGR_SEMWLC3			BIT_32(19)
+#define PWR_WIO3CIDCFGR_SEMWLC4			BIT_32(20)
+#define PWR_WIO3CIDCFGR_SEMWLC5			BIT_32(21)
+#define PWR_WIO3CIDCFGR_SEMWLC6			BIT_32(22)
+#define PWR_WIO3CIDCFGR_SEMWLC7			BIT_32(23)
+
+/* PWR_WIO3SEMCR register fields */
+#define PWR_WIO3SEMCR_SEM_MUTEX			BIT_32(0)
+#define PWR_WIO3SEMCR_SEMCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO3SEMCR_SEMCID_SHIFT		U(4)
+
+/* PWR_WIO4CIDCFGR register fields */
+#define PWR_WIO4CIDCFGR_CFEN			BIT_32(0)
+#define PWR_WIO4CIDCFGR_SEM_EN			BIT_32(1)
+#define PWR_WIO4CIDCFGR_SCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO4CIDCFGR_SCID_SHIFT		U(4)
+#define PWR_WIO4CIDCFGR_SEMWLC0			BIT_32(16)
+#define PWR_WIO4CIDCFGR_SEMWLC1			BIT_32(17)
+#define PWR_WIO4CIDCFGR_SEMWLC2			BIT_32(18)
+#define PWR_WIO4CIDCFGR_SEMWLC3			BIT_32(19)
+#define PWR_WIO4CIDCFGR_SEMWLC4			BIT_32(20)
+#define PWR_WIO4CIDCFGR_SEMWLC5			BIT_32(21)
+#define PWR_WIO4CIDCFGR_SEMWLC6			BIT_32(22)
+#define PWR_WIO4CIDCFGR_SEMWLC7			BIT_32(23)
+
+/* PWR_WIO4SEMCR register fields */
+#define PWR_WIO4SEMCR_SEM_MUTEX			BIT_32(0)
+#define PWR_WIO4SEMCR_SEMCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO4SEMCR_SEMCID_SHIFT		U(4)
+
+/* PWR_WIO5CIDCFGR register fields */
+#define PWR_WIO5CIDCFGR_CFEN			BIT_32(0)
+#define PWR_WIO5CIDCFGR_SEM_EN			BIT_32(1)
+#define PWR_WIO5CIDCFGR_SCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO5CIDCFGR_SCID_SHIFT		U(4)
+#define PWR_WIO5CIDCFGR_SEMWLC0			BIT_32(16)
+#define PWR_WIO5CIDCFGR_SEMWLC1			BIT_32(17)
+#define PWR_WIO5CIDCFGR_SEMWLC2			BIT_32(18)
+#define PWR_WIO5CIDCFGR_SEMWLC3			BIT_32(19)
+#define PWR_WIO5CIDCFGR_SEMWLC4			BIT_32(20)
+#define PWR_WIO5CIDCFGR_SEMWLC5			BIT_32(21)
+#define PWR_WIO5CIDCFGR_SEMWLC6			BIT_32(22)
+#define PWR_WIO5CIDCFGR_SEMWLC7			BIT_32(23)
+
+/* PWR_WIO5SEMCR register fields */
+#define PWR_WIO5SEMCR_SEM_MUTEX			BIT_32(0)
+#define PWR_WIO5SEMCR_SEMCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO5SEMCR_SEMCID_SHIFT		U(4)
+
+/* PWR_WIO6CIDCFGR register fields */
+#define PWR_WIO6CIDCFGR_CFEN			BIT_32(0)
+#define PWR_WIO6CIDCFGR_SEM_EN			BIT_32(1)
+#define PWR_WIO6CIDCFGR_SCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO6CIDCFGR_SCID_SHIFT		U(4)
+#define PWR_WIO6CIDCFGR_SEMWLC0			BIT_32(16)
+#define PWR_WIO6CIDCFGR_SEMWLC1			BIT_32(17)
+#define PWR_WIO6CIDCFGR_SEMWLC2			BIT_32(18)
+#define PWR_WIO6CIDCFGR_SEMWLC3			BIT_32(19)
+#define PWR_WIO6CIDCFGR_SEMWLC4			BIT_32(20)
+#define PWR_WIO6CIDCFGR_SEMWLC5			BIT_32(21)
+#define PWR_WIO6CIDCFGR_SEMWLC6			BIT_32(22)
+#define PWR_WIO6CIDCFGR_SEMWLC7			BIT_32(23)
+
+/* PWR_WIO6SEMCR register fields */
+#define PWR_WIO6SEMCR_SEM_MUTEX			BIT_32(0)
+#define PWR_WIO6SEMCR_SEMCID_MASK		GENMASK_32(6, 4)
+#define PWR_WIO6SEMCR_SEMCID_SHIFT		U(4)
+
+/* PWR_CPU1D1SR register fields */
+#define PWR_CPU1D1SR_HOLD_BOOT			BIT_32(0)
+#define PWR_CPU1D1SR_CSTATE_MASK		GENMASK_32(3, 2)
+#define PWR_CPU1D1SR_CSTATE_SHIFT		U(2)
+#define PWR_CPU1D1SR_DSTATE_MASK		GENMASK_32(10, 8)
+#define PWR_CPU1D1SR_DSTATE_SHIFT		U(8)
+
+/* PWR_CPU2D2SR register fields */
+#define PWR_CPU2D2SR_HOLD_BOOT			BIT_32(0)
+#define PWR_CPU2D2SR_WFBEN			BIT_32(1)
+#define PWR_CPU2D2SR_CSTATE_MASK		GENMASK_32(3, 2)
+#define PWR_CPU2D2SR_CSTATE_SHIFT		U(2)
+#define PWR_CPU2D2SR_DSTATE_MASK		GENMASK_32(10, 8)
+#define PWR_CPU2D2SR_DSTATE_SHIFT		U(8)
+
+/* PWR_CPU3D3SR register fields */
+#define PWR_CPU3D3SR_CSTATE_MASK		GENMASK_32(3, 2)
+#define PWR_CPU3D3SR_CSTATE_SHIFT		U(2)
+#define PWR_CPU3D3SR_DSTATE_MASK		GENMASK_32(10, 8)
+#define PWR_CPU3D3SR_DSTATE_SHIFT		U(8)
+
+/* PWR_DBGR register fields */
+#define PWR_DBGR_FD3S				BIT_32(0)
+#define PWR_DBGR_VDDIOKRETRAM			BIT_32(16)
+#define PWR_DBGR_VDDIOKBKPRAM			BIT_32(17)
+#define PWR_DBGR_VDDIOKD3			BIT_32(18)
+#define PWR_DBGR_VDDIOKLPSRAM1			BIT_32(19)
+
+/* PWR_VERR register fields */
+#define PWR_VERR_MINREV_MASK			GENMASK_32(3, 0)
+#define PWR_VERR_MINREV_SHIFT			U(0)
+#define PWR_VERR_MAJREV_MASK			GENMASK_32(7, 4)
+#define PWR_VERR_MAJREV_SHIFT			U(4)
+
+#endif /* STM32MP2_PWR_H */
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
index 096e0b1..cfa5831 100644
--- a/include/lib/cpus/aarch32/cpu_macros.S
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -115,11 +115,6 @@
 	  .popsection
 	.endif
 
-	/*
-	 * Mandatory errata status printing function for CPUs of
-	 * this class.
-	 */
-	.word \_name\()_errata_report
 	.word \_name\()_cpu_str
 
 #ifdef IMAGE_BL32
@@ -130,45 +125,6 @@
 #endif
 	.endm
 
-#if REPORT_ERRATA
-	/*
-	 * Print status of a CPU errata
-	 *
-	 * _chosen:
-	 *	Identifier indicating whether or not a CPU errata has been
-	 *	compiled in.
-	 * _cpu:
-	 *	Name of the CPU
-	 * _id:
-	 *	Errata identifier
-	 * _rev_var:
-	 *	Register containing the combined value CPU revision and variant
-	 *	- typically the return value of cpu_get_rev_var
-	 */
-	.macro report_errata _chosen, _cpu, _id, _rev_var=r4
-	/* Stash a string with errata ID */
-	.pushsection .rodata
-	\_cpu\()_errata_\_id\()_str:
-	.asciz	"\_id"
-	.popsection
-
-	/* Check whether errata applies */
-	mov	r0, \_rev_var
-	bl	check_errata_\_id
-
-	.ifeq \_chosen
-	/*
-	 * Errata workaround has not been compiled in. If the errata would have
-	 * applied had it been compiled in, print its status as missing.
-	 */
-	cmp	r0, #0
-	movne	r0, #ERRATA_MISSING
-	.endif
-	ldr	r1, =\_cpu\()_cpu_str
-	ldr	r2, =\_cpu\()_errata_\_id\()_str
-	bl	errata_print_msg
-	.endm
-#endif
 	/*
 	 * Helper macro that reads the part number of the current CPU and jumps
 	 * to the given label if it matches the CPU MIDR provided.
@@ -239,21 +195,4 @@
 	.popsection
 .endm
 
-/*
- * Maintain compatibility with the old scheme of "each cpu has its own reporter".
- * TODO remove entirely once all cpus have been converted. This includes the
- * cpu_ops entry, as print_errata_status can call this directly for all cpus
- */
-.macro errata_report_shim _cpu:req
-	#if REPORT_ERRATA
-	func \_cpu\()_errata_report
-		push	{r12, lr}
-
-		bl generic_errata_report
-
-		pop	{r12, lr}
-		bx	lr
-	endfunc \_cpu\()_errata_report
-	#endif
-.endm
 #endif /* CPU_MACROS_S */
diff --git a/include/lib/cpus/aarch64/cortex_a520.h b/include/lib/cpus/aarch64/cortex_a520.h
index ed3401d..11ddea9 100644
--- a/include/lib/cpus/aarch64/cortex_a520.h
+++ b/include/lib/cpus/aarch64/cortex_a520.h
@@ -28,4 +28,15 @@
 #define CORTEX_A520_CPUPWRCTLR_EL1				S3_0_C15_C2_7
 #define CORTEX_A520_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
 
+#ifndef __ASSEMBLER__
+#if ERRATA_A520_2938996
+long  check_erratum_cortex_a520_2938996(long cpu_rev);
+#else
+static inline long  check_erratum_cortex_a520_2938996(long cpu_rev)
+{
+       return 0;
+}
+#endif /* ERRATA_A520_2938996 */
+#endif /* __ASSEMBLER__ */
+
 #endif /* CORTEX_A520_H */
diff --git a/include/lib/cpus/aarch64/cortex_a720.h b/include/lib/cpus/aarch64/cortex_a720.h
index fb27f79..129c1ee 100644
--- a/include/lib/cpus/aarch64/cortex_a720.h
+++ b/include/lib/cpus/aarch64/cortex_a720.h
@@ -23,6 +23,11 @@
 #define CORTEX_A720_CPUACTLR2_EL1				S3_0_C15_C1_1
 
 /*******************************************************************************
+ * CPU Auxiliary Control register 4 specific definitions.
+ ******************************************************************************/
+#define CORTEX_A720_CPUACTLR4_EL1				S3_0_C15_C1_3
+
+/*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
 #define CORTEX_A720_CPUECTLR_EL1				S3_0_C15_C1_4
diff --git a/include/lib/cpus/aarch64/cortex_x4.h b/include/lib/cpus/aarch64/cortex_x4.h
index 433687b..4b6af8b 100644
--- a/include/lib/cpus/aarch64/cortex_x4.h
+++ b/include/lib/cpus/aarch64/cortex_x4.h
@@ -28,4 +28,21 @@
  ******************************************************************************/
 #define CORTEX_X4_CPUACTLR3_EL1				S3_0_C15_C1_2
 
+/*******************************************************************************
+ * CPU Auxiliary control register 5 specific definitions
+ ******************************************************************************/
+#define CORTEX_X4_CPUACTLR5_EL1				S3_0_C15_C8_0
+#define CORTEX_X4_CPUACTLR5_EL1_BIT_14			(ULL(1) << 14)
+
+#ifndef __ASSEMBLER__
+#if ERRATA_X4_2726228
+long check_erratum_cortex_x4_2726228(long cpu_rev);
+#else
+static inline long check_erratum_cortex_x4_2726228(long cpu_rev)
+{
+       return 0;
+}
+#endif /* ERRATA_X4_2726228 */
+#endif /* __ASSEMBLER__ */
+
 #endif /* CORTEX_X4_H */
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index d49d82e..98294b9 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -132,12 +132,6 @@
 	  .popsection
 	.endif
 
-
-	/*
-	 * Mandatory errata status printing function for CPUs of
-	 * this class.
-	 */
-	.quad \_name\()_errata_report
 	.quad \_name\()_cpu_str
 
 #ifdef IMAGE_BL31
@@ -171,49 +165,6 @@
 			\_extra1, \_extra2, \_extra3, 0, \_power_down_ops
 	.endm
 
-/* TODO can be deleted once all CPUs have been converted */
-#if REPORT_ERRATA
-	/*
-	 * Print status of a CPU errata
-	 *
-	 * _chosen:
-	 *	Identifier indicating whether or not a CPU errata has been
-	 *	compiled in.
-	 * _cpu:
-	 *	Name of the CPU
-	 * _id:
-	 *	Errata identifier
-	 * _rev_var:
-	 *	Register containing the combined value CPU revision and variant
-	 *	- typically the return value of cpu_get_rev_var
-	 */
-	.macro report_errata _chosen, _cpu, _id, _rev_var=x8
-	/* Stash a string with errata ID */
-	.pushsection .rodata
-	\_cpu\()_errata_\_id\()_str:
-	.asciz	"\_id"
-	.popsection
-
-	/* Check whether errata applies */
-	mov	x0, \_rev_var
-	/* Shall clobber: x0-x7 */
-	bl	check_errata_\_id
-
-	.ifeq \_chosen
-	/*
-	 * Errata workaround has not been compiled in. If the errata would have
-	 * applied had it been compiled in, print its status as missing.
-	 */
-	cbz	x0, 900f
-	mov	x0, #ERRATA_MISSING
-	.endif
-900:
-	adr	x1, \_cpu\()_cpu_str
-	adr	x2, \_cpu\()_errata_\_id\()_str
-	bl	errata_print_msg
-	.endm
-#endif
-
 	/*
 	 * This macro is used on some CPUs to detect if they are vulnerable
 	 * to CVE-2017-5715.
@@ -622,23 +573,4 @@
 	endfunc \_cpu\()_reset_func
 .endm
 
-/*
- * Maintain compatibility with the old scheme of each cpu has its own reporting.
- * TODO remove entirely once all cpus have been converted. This includes the
- * cpu_ops entry, as print_errata_status can call this directly for all cpus
- */
-.macro errata_report_shim _cpu:req
-	#if REPORT_ERRATA
-	func \_cpu\()_errata_report
-		/* normal stack frame for pretty debugging */
-		stp	x29, x30, [sp, #-16]!
-		mov	x29, sp
-
-		bl	generic_errata_report
-
-		ldp	x29, x30, [sp], #16
-		ret
-	endfunc \_cpu\()_errata_report
-	#endif
-.endm
 #endif /* CPU_MACROS_S */
diff --git a/include/lib/cpus/cpu_ops.h b/include/lib/cpus/cpu_ops.h
index 8b36ff1..0084189 100644
--- a/include/lib/cpus/cpu_ops.h
+++ b/include/lib/cpus/cpu_ops.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2023-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -57,7 +57,6 @@
 #define CPU_ERRATA_LIST_END_SIZE	CPU_WORD_SIZE
 /* Fields required to print errata status  */
 #if REPORT_ERRATA
-#define CPU_ERRATA_FUNC_SIZE	CPU_WORD_SIZE
 #define CPU_CPU_STR_SIZE	CPU_WORD_SIZE
 /* BL1 doesn't require mutual exclusion and printed flag. */
 #if defined(IMAGE_BL31) || defined(IMAGE_BL32)
@@ -68,7 +67,6 @@
 #define CPU_ERRATA_PRINTED_SIZE	0
 #endif /* defined(IMAGE_BL31) || defined(IMAGE_BL32) */
 #else
-#define CPU_ERRATA_FUNC_SIZE	0
 #define CPU_CPU_STR_SIZE	0
 #define CPU_ERRATA_LOCK_SIZE	0
 #define CPU_ERRATA_PRINTED_SIZE	0
@@ -98,8 +96,7 @@
 #endif /* __aarch64__ */
 #define CPU_ERRATA_LIST_START	CPU_PWR_DWN_OPS + CPU_PWR_DWN_OPS_SIZE
 #define CPU_ERRATA_LIST_END	CPU_ERRATA_LIST_START + CPU_ERRATA_LIST_START_SIZE
-#define CPU_ERRATA_FUNC		CPU_ERRATA_LIST_END + CPU_ERRATA_LIST_END_SIZE
-#define CPU_CPU_STR		CPU_ERRATA_FUNC + CPU_ERRATA_FUNC_SIZE
+#define CPU_CPU_STR		CPU_ERRATA_LIST_END + CPU_ERRATA_LIST_END_SIZE
 #define CPU_ERRATA_LOCK		CPU_CPU_STR + CPU_CPU_STR_SIZE
 #define CPU_ERRATA_PRINTED	CPU_ERRATA_LOCK + CPU_ERRATA_LOCK_SIZE
 #if __aarch64__
@@ -130,7 +127,6 @@
 	void *errata_list_start;
 	void *errata_list_end;
 #if REPORT_ERRATA
-	void (*errata_func)(void);
 	char *cpu_str;
 #if defined(IMAGE_BL31) || defined(IMAGE_BL32)
 	spinlock_t *errata_lock;
diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h
index 2080898..ef1b02b 100644
--- a/include/lib/cpus/errata.h
+++ b/include/lib/cpus/errata.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -25,11 +25,19 @@
 #define ERRATUM_MITIGATED	ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
 #define ERRATUM_ENTRY_SIZE	ERRATUM_MITIGATED + ERRATUM_MITIGATED_SIZE
 
+/* Errata status */
+#define ERRATA_NOT_APPLIES	0
+#define ERRATA_APPLIES		1
+#define ERRATA_MISSING		2
+
 #ifndef __ASSEMBLER__
 #include <lib/cassert.h>
 
 void print_errata_status(void);
-void errata_print_msg(unsigned int status, const char *cpu, const char *id);
+
+#if ERRATA_A520_2938996 || ERRATA_X4_2726228
+unsigned int check_if_affected_core(void);
+#endif
 
 /*
  * NOTE that this structure will be different on AArch32 and AArch64. The
@@ -74,11 +82,6 @@
 
 #endif /* __ASSEMBLER__ */
 
-/* Errata status */
-#define ERRATA_NOT_APPLIES	0
-#define ERRATA_APPLIES		1
-#define ERRATA_MISSING		2
-
 /* Macro to get CPU revision code for checking errata version compatibility. */
 #define CPU_REV(r, p)		((r << 4) | p)
 
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 7c10506..87f1541 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -7,9 +7,18 @@
 #ifndef CONTEXT_H
 #define CONTEXT_H
 
-#include <lib/el3_runtime/context_el1.h>
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 #include <lib/el3_runtime/context_el2.h>
+#else
+/**
+ * El1 context is required either when:
+ * IMAGE_BL1 || ((!CTX_INCLUDE_EL2_REGS) && IMAGE_BL31)
+ */
+#include <lib/el3_runtime/context_el1.h>
+#endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
+
 #include <lib/el3_runtime/cpu_data.h>
+#include <lib/el3_runtime/simd_ctx.h>
 #include <lib/utils_def.h>
 
 /*******************************************************************************
@@ -82,60 +91,11 @@
  #define CTX_EL3STATE_END	U(0x50) /* Align to the next 16 byte boundary */
 #endif /* FFH_SUPPORT */
 
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the 'fp_regs'
- * structure at their correct offsets.
- ******************************************************************************/
-# define CTX_FPREGS_OFFSET	(CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
-#if CTX_INCLUDE_FPREGS
-#define CTX_FP_Q0		U(0x0)
-#define CTX_FP_Q1		U(0x10)
-#define CTX_FP_Q2		U(0x20)
-#define CTX_FP_Q3		U(0x30)
-#define CTX_FP_Q4		U(0x40)
-#define CTX_FP_Q5		U(0x50)
-#define CTX_FP_Q6		U(0x60)
-#define CTX_FP_Q7		U(0x70)
-#define CTX_FP_Q8		U(0x80)
-#define CTX_FP_Q9		U(0x90)
-#define CTX_FP_Q10		U(0xa0)
-#define CTX_FP_Q11		U(0xb0)
-#define CTX_FP_Q12		U(0xc0)
-#define CTX_FP_Q13		U(0xd0)
-#define CTX_FP_Q14		U(0xe0)
-#define CTX_FP_Q15		U(0xf0)
-#define CTX_FP_Q16		U(0x100)
-#define CTX_FP_Q17		U(0x110)
-#define CTX_FP_Q18		U(0x120)
-#define CTX_FP_Q19		U(0x130)
-#define CTX_FP_Q20		U(0x140)
-#define CTX_FP_Q21		U(0x150)
-#define CTX_FP_Q22		U(0x160)
-#define CTX_FP_Q23		U(0x170)
-#define CTX_FP_Q24		U(0x180)
-#define CTX_FP_Q25		U(0x190)
-#define CTX_FP_Q26		U(0x1a0)
-#define CTX_FP_Q27		U(0x1b0)
-#define CTX_FP_Q28		U(0x1c0)
-#define CTX_FP_Q29		U(0x1d0)
-#define CTX_FP_Q30		U(0x1e0)
-#define CTX_FP_Q31		U(0x1f0)
-#define CTX_FP_FPSR		U(0x200)
-#define CTX_FP_FPCR		U(0x208)
-#if CTX_INCLUDE_AARCH32_REGS
-#define CTX_FP_FPEXC32_EL2	U(0x210)
-#define CTX_FPREGS_END		U(0x220) /* Align to the next 16 byte boundary */
-#else
-#define CTX_FPREGS_END		U(0x210) /* Align to the next 16 byte boundary */
-#endif /* CTX_INCLUDE_AARCH32_REGS */
-#else
-#define CTX_FPREGS_END		U(0)
-#endif /* CTX_INCLUDE_FPREGS */
 
 /*******************************************************************************
  * Registers related to CVE-2018-3639
  ******************************************************************************/
-#define CTX_CVE_2018_3639_OFFSET	(CTX_FPREGS_OFFSET + CTX_FPREGS_END)
+#define CTX_CVE_2018_3639_OFFSET	(CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
 #define CTX_CVE_2018_3639_DISABLE	U(0)
 #define CTX_CVE_2018_3639_END		U(0x10) /* Align to the next 16 byte boundary */
 
@@ -230,9 +190,6 @@
 /* Constants to determine the size of individual context structures */
 #define CTX_GPREG_ALL		(CTX_GPREGS_END >> DWORD_SHIFT)
 
-#if CTX_INCLUDE_FPREGS
-# define CTX_FPREG_ALL		(CTX_FPREGS_END >> DWORD_SHIFT)
-#endif
 #define CTX_EL3STATE_ALL	(CTX_EL3STATE_END >> DWORD_SHIFT)
 #define CTX_CVE_2018_3639_ALL	(CTX_CVE_2018_3639_END >> DWORD_SHIFT)
 
@@ -253,15 +210,6 @@
 DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
 
 /*
- * AArch64 floating point register context structure for preserving
- * the floating point state during switches from one security state to
- * another.
- */
-#if CTX_INCLUDE_FPREGS
-DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
-#endif
-
-/*
  * Miscellaneous registers used by EL3 firmware to maintain its state
  * across exception entries and exits
  */
@@ -300,9 +248,6 @@
 	gp_regs_t gpregs_ctx;
 	el3_state_t el3state_ctx;
 
-#if CTX_INCLUDE_FPREGS
-	fp_regs_t fpregs_ctx;
-#endif
 	cve_2018_3639_t cve_2018_3639_ctx;
 
 #if ERRATA_SPECULATIVE_AT
@@ -313,10 +258,16 @@
 	pauth_t pauth_ctx;
 #endif
 
-	el1_sysregs_t el1_sysregs_ctx;
-
-#if CTX_INCLUDE_EL2_REGS
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 	el2_sysregs_t el2_sysregs_ctx;
+#else
+	/* El1 context should be included only either for IMAGE_BL1,
+	 * or for IMAGE_BL31 when CTX_INCLUDE_EL2_REGS=0:
+	 * When SPMD_SPM_AT_SEL2=1, SPMC at S-EL2 takes care of saving
+	 * and restoring EL1 registers. In this case, BL31 at EL3 can
+	 * exclude save and restore of EL1 context registers.
+	 */
+	el1_sysregs_t el1_sysregs_ctx;
 #endif
 
 } cpu_context_t;
@@ -335,13 +286,13 @@
 
 /* Macros to access members of the 'cpu_context_t' structure */
 #define get_el3state_ctx(h)	(&((cpu_context_t *) h)->el3state_ctx)
-#if CTX_INCLUDE_FPREGS
-# define get_fpregs_ctx(h)	(&((cpu_context_t *) h)->fpregs_ctx)
-#endif
+
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
+#define get_el2_sysregs_ctx(h)	(&((cpu_context_t *) h)->el2_sysregs_ctx)
+#else
 #define get_el1_sysregs_ctx(h)	(&((cpu_context_t *) h)->el1_sysregs_ctx)
-#if CTX_INCLUDE_EL2_REGS
-# define get_el2_sysregs_ctx(h)	(&((cpu_context_t *) h)->el2_sysregs_ctx)
 #endif
+
 #define get_gpregs_ctx(h)	(&((cpu_context_t *) h)->gpregs_ctx)
 #define get_cve_2018_3639_ctx(h)	(&((cpu_context_t *) h)->cve_2018_3639_ctx)
 
@@ -364,10 +315,6 @@
 CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
 	assert_core_context_el3state_offset_mismatch);
 
-#if CTX_INCLUDE_FPREGS
-CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx),
-	assert_core_context_fp_offset_mismatch);
-#endif /* CTX_INCLUDE_FPREGS */
 
 CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx),
 	assert_core_context_cve_2018_3639_offset_mismatch);
@@ -422,10 +369,73 @@
  * Function prototypes
  ******************************************************************************/
 #if CTX_INCLUDE_FPREGS
-void fpregs_context_save(fp_regs_t *regs);
-void fpregs_context_restore(fp_regs_t *regs);
+void fpregs_context_save(simd_regs_t *regs);
+void fpregs_context_restore(simd_regs_t *regs);
 #endif
 
+/*******************************************************************************
+ * The next four inline functions are required for IMAGE_BL1, as well as for
+ * IMAGE_BL31 for the below combinations.
+ * ============================================================================
+ * | ERRATA_SPECULATIVE_AT| CTX_INCLUDE_EL2_REGS |   Combination              |
+ * ============================================================================
+ * |       0              |       0              |   Valid (EL1 ctx)          |
+ * |______________________|______________________|____________________________|
+ * |                      |                      | Invalid (No Errata/EL1 Ctx)|
+ * |       0              |       1              | Hence commented out.       |
+ * |______________________|______________________|____________________________|
+ * |                      |                      |                            |
+ * |       1              |       0              |   Valid (Errata ctx)       |
+ * |______________________|______________________|____________________________|
+ * |                      |                      |                            |
+ * |       1              |       1              |   Valid (Errata ctx)       |
+ * |______________________|______________________|____________________________|
+ * ============================================================================
+ ******************************************************************************/
+#if (IMAGE_BL1 || ((ERRATA_SPECULATIVE_AT) || (!CTX_INCLUDE_EL2_REGS)))
+
+static inline void write_ctx_sctlr_el1_reg_errata(cpu_context_t *ctx, u_register_t val)
+{
+#if (ERRATA_SPECULATIVE_AT)
+	write_ctx_reg(get_errata_speculative_at_ctx(ctx),
+		      CTX_ERRATA_SPEC_AT_SCTLR_EL1, val);
+#else
+	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, val);
+#endif /* ERRATA_SPECULATIVE_AT */
+}
+
+static inline void write_ctx_tcr_el1_reg_errata(cpu_context_t *ctx, u_register_t val)
+{
+#if (ERRATA_SPECULATIVE_AT)
+	write_ctx_reg(get_errata_speculative_at_ctx(ctx),
+		      CTX_ERRATA_SPEC_AT_TCR_EL1, val);
+#else
+	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), tcr_el1, val);
+#endif /* ERRATA_SPECULATIVE_AT */
+}
+
+static inline u_register_t read_ctx_sctlr_el1_reg_errata(cpu_context_t *ctx)
+{
+#if (ERRATA_SPECULATIVE_AT)
+	return read_ctx_reg(get_errata_speculative_at_ctx(ctx),
+		      CTX_ERRATA_SPEC_AT_SCTLR_EL1);
+#else
+	return read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1);
+#endif /* ERRATA_SPECULATIVE_AT */
+}
+
+static inline u_register_t read_ctx_tcr_el1_reg_errata(cpu_context_t *ctx)
+{
+#if (ERRATA_SPECULATIVE_AT)
+	return read_ctx_reg(get_errata_speculative_at_ctx(ctx),
+		      CTX_ERRATA_SPEC_AT_TCR_EL1);
+#else
+	return read_el1_ctx_common(get_el1_sysregs_ctx(ctx), tcr_el1);
+#endif /* ERRATA_SPECULATIVE_AT */
+}
+
+#endif /* (IMAGE_BL1 || ((ERRATA_SPECULATIVE_AT) || (!CTX_INCLUDE_EL2_REGS))) */
+
 #endif /* __ASSEMBLER__ */
 
 #endif /* CONTEXT_H */
diff --git a/include/lib/el3_runtime/context_el2.h b/include/lib/el3_runtime/context_el2.h
index ca1ea4e..14c1fb6 100644
--- a/include/lib/el3_runtime/context_el2.h
+++ b/include/lib/el3_runtime/context_el2.h
@@ -13,7 +13,6 @@
  * AArch64 EL2 system register context structure for preserving the
  * architectural state during world switches.
  ******************************************************************************/
-#if CTX_INCLUDE_EL2_REGS
 typedef struct el2_common_regs {
 	uint64_t actlr_el2;
 	uint64_t afsr0_el2;
@@ -359,7 +358,6 @@
 #define write_el2_ctx_mpam(ctx, reg, val)
 #endif /* CTX_INCLUDE_MPAM_REGS */
 
-#endif /* CTX_INCLUDE_EL2_REGS */
 /******************************************************************************/
 
 #endif /* __ASSEMBLER__ */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
index 7451b85..70dbd46 100644
--- a/include/lib/el3_runtime/context_mgmt.h
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -44,15 +44,17 @@
 void cm_manage_extensions_el3(void);
 void manage_extensions_nonsecure_per_world(void);
 void cm_el3_arch_init_per_world(per_world_context_t *per_world_ctx);
+void cm_handle_asymmetric_features(void);
 #endif
 
-#if CTX_INCLUDE_EL2_REGS
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 void cm_el2_sysregs_context_save(uint32_t security_state);
 void cm_el2_sysregs_context_restore(uint32_t security_state);
-#endif
-
+#else
 void cm_el1_sysregs_context_save(uint32_t security_state);
 void cm_el1_sysregs_context_restore(uint32_t security_state);
+#endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
+
 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
 void cm_set_elr_spsr_el3(uint32_t security_state,
 			uintptr_t entrypoint, uint32_t spsr);
@@ -95,6 +97,7 @@
 void cm_set_next_context(void *context);
 static inline void cm_manage_extensions_el3(void) {}
 static inline void manage_extensions_nonsecure_per_world(void) {}
+static inline void cm_handle_asymmetric_features(void) {}
 #endif /* __aarch64__ */
 
 #endif /* CONTEXT_MGMT_H */
diff --git a/include/lib/el3_runtime/simd_ctx.h b/include/lib/el3_runtime/simd_ctx.h
new file mode 100644
index 0000000..fdbe24f
--- /dev/null
+++ b/include/lib/el3_runtime/simd_ctx.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SIMD_CTX_H
+#define SIMD_CTX_H
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'simd_context'
+ * structure at their correct offsets.
+ ******************************************************************************/
+
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+#if CTX_INCLUDE_SVE_REGS
+#define SIMD_VECTOR_LEN_BYTES	(SVE_VECTOR_LEN / 8) /* Length of vector in bytes */
+#elif CTX_INCLUDE_FPREGS
+#define SIMD_VECTOR_LEN_BYTES	U(16) /* 128 bits fixed vector length for FPU */
+#endif /* CTX_INCLUDE_SVE_REGS */
+
+#define CTX_SIMD_VECTORS	U(0)
+/* there are 32 vector registers, each of size SIMD_VECTOR_LEN_BYTES */
+#define CTX_SIMD_FPSR		(CTX_SIMD_VECTORS + (32 * SIMD_VECTOR_LEN_BYTES))
+#define CTX_SIMD_FPCR		(CTX_SIMD_FPSR + 8)
+
+#if CTX_INCLUDE_FPREGS && CTX_INCLUDE_AARCH32_REGS
+#define CTX_SIMD_FPEXC32	(CTX_SIMD_FPCR + 8)
+#define CTX_SIMD_PREDICATES	(CTX_SIMD_FPEXC32 + 16)
+#else
+#define CTX_SIMD_PREDICATES      (CTX_SIMD_FPCR + 8)
+#endif /* CTX_INCLUDE_FPREGS && CTX_INCLUDE_AARCH32_REGS */
+
+/*
+ * Each predicate register is 1/8th the size of a vector register and there are 16
+ * predicate registers
+ */
+#define CTX_SIMD_FFR		(CTX_SIMD_PREDICATES + (16 * (SIMD_VECTOR_LEN_BYTES / 8)))
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+#include <lib/cassert.h>
+
+/*
+ * Please don't change order of fields in this struct as that may violate
+ * alignment requirements and affect how assembly code accesses members of this
+ * struct.
+ */
+typedef struct {
+	uint8_t vectors[32][SIMD_VECTOR_LEN_BYTES];
+	uint8_t fpsr[8];
+	uint8_t fpcr[8];
+#if CTX_INCLUDE_FPREGS && CTX_INCLUDE_AARCH32_REGS
+	/* 16 bytes to align to next 16 byte boundary when CTX_INCLUDE_SVE_REGS is 0 */
+	uint8_t fpexc32_el2[16];
+#endif
+#if CTX_INCLUDE_SVE_REGS
+	/* FFR and each of predicates is one-eigth of the SVE vector length */
+	uint8_t predicates[16][SIMD_VECTOR_LEN_BYTES / 8];
+	uint8_t ffr[SIMD_VECTOR_LEN_BYTES / 8];
+	/* SMCCCv1.3 FID[16] hint bit state recorded on EL3 entry */
+	bool hint;
+#endif /* CTX_INCLUDE_SVE_REGS */
+} __aligned(16) simd_regs_t;
+
+CASSERT(CTX_SIMD_VECTORS == __builtin_offsetof(simd_regs_t, vectors),
+		assert_vectors_mismatch);
+
+CASSERT(CTX_SIMD_FPSR == __builtin_offsetof(simd_regs_t, fpsr),
+		assert_fpsr_mismatch);
+
+CASSERT(CTX_SIMD_FPCR == __builtin_offsetof(simd_regs_t, fpcr),
+		assert_fpcr_mismatch);
+
+#if CTX_INCLUDE_FPREGS && CTX_INCLUDE_AARCH32_REGS
+CASSERT(CTX_SIMD_FPEXC32 == __builtin_offsetof(simd_regs_t, fpexc32_el2),
+		assert_fpex32_mismtatch);
+#endif
+
+#if CTX_INCLUDE_SVE_REGS
+CASSERT(CTX_SIMD_PREDICATES == __builtin_offsetof(simd_regs_t, predicates),
+		assert_predicates_mismatch);
+
+CASSERT(CTX_SIMD_FFR == __builtin_offsetof(simd_regs_t, ffr),
+		assert_ffr_mismatch);
+#endif
+
+void simd_ctx_save(uint32_t security_state, bool hint_sve);
+void simd_ctx_restore(uint32_t security_state);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
+
+#endif /* SIMD_CTX_H */
diff --git a/include/lib/extensions/sve.h b/include/lib/extensions/sve.h
index 947c905..2979efb 100644
--- a/include/lib/extensions/sve.h
+++ b/include/lib/extensions/sve.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <context.h>
 
 #if (ENABLE_SME_FOR_NS || ENABLE_SVE_FOR_NS)
+
 void sve_init_el2_unused(void);
 void sve_enable_per_world(per_world_context_t *per_world_ctx);
 void sve_disable_per_world(per_world_context_t *per_world_ctx);
@@ -25,4 +26,9 @@
 }
 #endif /* ( ENABLE_SME_FOR_NS | ENABLE_SVE_FOR_NS ) */
 
+#if CTX_INCLUDE_SVE_REGS
+void sve_context_save(simd_regs_t *regs);
+void sve_context_restore(simd_regs_t *regs);
+#endif
+
 #endif /* SVE_H */
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
index 9f45e38..f4dc0d1 100644
--- a/lib/cpus/aarch32/aem_generic.S
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -40,14 +40,6 @@
 	b	dcsw_op_all
 endfunc aem_generic_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for AEM. Must follow AAPCS.
- */
-func aem_generic_errata_report
-	bx	lr
-endfunc aem_generic_errata_report
-#endif
 
 /* cpu_ops for Base AEM FVP */
 declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
diff --git a/lib/cpus/aarch32/cortex_a12.S b/lib/cpus/aarch32/cortex_a12.S
index 8eec27c..b95020e 100644
--- a/lib/cpus/aarch32/cortex_a12.S
+++ b/lib/cpus/aarch32/cortex_a12.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -73,8 +73,6 @@
 	b	cortex_a12_disable_smp
 endfunc cortex_a12_cluster_pwr_dwn
 
-errata_report_shim cortex_a12
-
 declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
 	cortex_a12_reset_func, \
 	cortex_a12_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a15.S b/lib/cpus/aarch32/cortex_a15.S
index b41676d..53489ad 100644
--- a/lib/cpus/aarch32/cortex_a15.S
+++ b/lib/cpus/aarch32/cortex_a15.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -172,8 +172,6 @@
 	b	cortex_a15_disable_smp
 endfunc cortex_a15_cluster_pwr_dwn
 
-errata_report_shim cortex_a15
-
 declare_cpu_ops cortex_a15, CORTEX_A15_MIDR, \
 	cortex_a15_reset_func, \
 	cortex_a15_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a17.S b/lib/cpus/aarch32/cortex_a17.S
index 1877570..05e9616 100644
--- a/lib/cpus/aarch32/cortex_a17.S
+++ b/lib/cpus/aarch32/cortex_a17.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -106,8 +106,6 @@
 
 add_erratum_entry cortex_a17, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
 
-errata_report_shim cortex_a17
-
 func cortex_a17_reset_func
 	mov	r5, lr
 	bl	cpu_get_rev_var
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
index d08b4ff..c92a8c1 100644
--- a/lib/cpus/aarch32/cortex_a32.S
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -117,8 +117,6 @@
 	b	cortex_a32_disable_smp
 endfunc cortex_a32_cluster_pwr_dwn
 
-errata_report_shim cortex_a32
-
 declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
 	cortex_a32_reset_func, \
 	cortex_a32_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a5.S b/lib/cpus/aarch32/cortex_a5.S
index 625ea7b..146eb9c 100644
--- a/lib/cpus/aarch32/cortex_a5.S
+++ b/lib/cpus/aarch32/cortex_a5.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -69,8 +69,6 @@
 	b	cortex_a5_disable_smp
 endfunc cortex_a5_cluster_pwr_dwn
 
-errata_report_shim cortex_a5
-
 declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
 	cortex_a5_reset_func, \
 	cortex_a5_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
index 89b238a..60be2b3 100644
--- a/lib/cpus/aarch32/cortex_a53.S
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -297,8 +297,6 @@
 	b	cortex_a53_disable_smp
 endfunc cortex_a53_cluster_pwr_dwn
 
-errata_report_shim cortex_a53
-
 declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
 	cortex_a53_reset_func, \
 	cortex_a53_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index 1e5377b..d563482 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -606,8 +606,6 @@
 	b	cortex_a57_disable_ext_debug
 endfunc cortex_a57_cluster_pwr_dwn
 
-errata_report_shim cortex_a57
-
 declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
 	cortex_a57_reset_func, \
 	cortex_a57_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a7.S b/lib/cpus/aarch32/cortex_a7.S
index 4842ca6..f99ae79 100644
--- a/lib/cpus/aarch32/cortex_a7.S
+++ b/lib/cpus/aarch32/cortex_a7.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -73,8 +73,6 @@
 	b	cortex_a7_disable_smp
 endfunc cortex_a7_cluster_pwr_dwn
 
-errata_report_shim cortex_a7
-
 declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
 	cortex_a7_reset_func, \
 	cortex_a7_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index 77cf84d..8d399fd 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -256,8 +256,6 @@
 	b	cortex_a72_disable_ext_debug
 endfunc cortex_a72_cluster_pwr_dwn
 
-errata_report_shim cortex_a72
-
 declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
 	cortex_a72_reset_func, \
 	cortex_a72_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a9.S b/lib/cpus/aarch32/cortex_a9.S
index 1e9757a..dc5ff27 100644
--- a/lib/cpus/aarch32/cortex_a9.S
+++ b/lib/cpus/aarch32/cortex_a9.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -57,8 +57,6 @@
 
 add_erratum_entry cortex_a9, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
 
-errata_report_shim cortex_a9
-
 func cortex_a9_reset_func
 #if IMAGE_BL32 && WORKAROUND_CVE_2017_5715
 	ldr	r0, =wa_cve_2017_5715_bpiall_vbar
diff --git a/lib/cpus/aarch64/a64fx.S b/lib/cpus/aarch64/a64fx.S
index 54c20c3..4893a44 100644
--- a/lib/cpus/aarch64/a64fx.S
+++ b/lib/cpus/aarch64/a64fx.S
@@ -16,15 +16,6 @@
 func a64fx_cluster_pwr_dwn
 endfunc a64fx_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for A64FX. Must follow AAPCS.
- */
-func a64fx_errata_report
-        ret
-endfunc a64fx_errata_report
-#endif
-
         /* ---------------------------------------------
          * This function provides cpu specific
          * register information for crash reporting.
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index d47279a..d5634cf 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2019, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -74,15 +74,6 @@
 	b	dcsw_op_all
 endfunc aem_generic_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for AEM. Must follow AAPCS.
- */
-func aem_generic_errata_report
-	ret
-endfunc aem_generic_errata_report
-#endif
-
 	/* ---------------------------------------------
 	 * This function provides cpu specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
index 6ffb944..c3d8c8d 100644
--- a/lib/cpus/aarch64/cortex_a35.S
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -111,8 +111,6 @@
 	b	cortex_a35_disable_smp
 endfunc cortex_a35_cluster_pwr_dwn
 
-errata_report_shim cortex_a35
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a35 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S
index a59b92c..b49d45a 100644
--- a/lib/cpus/aarch64/cortex_a510.S
+++ b/lib/cpus/aarch64/cortex_a510.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -204,8 +204,6 @@
 	ret
 endfunc cortex_a510_core_pwr_dwn
 
-errata_report_shim cortex_a510
-
 cpu_reset_func_start cortex_a510
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S
index 74ecbf7..811c836 100644
--- a/lib/cpus/aarch64/cortex_a520.S
+++ b/lib/cpus/aarch64/cortex_a520.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -11,6 +11,9 @@
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
+/* .global erratum_cortex_a520_2938996_wa */
+.global check_erratum_cortex_a520_2938996
+
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
 #error "Cortex A520 must be compiled with HW_ASSISTED_COHERENCY enabled"
@@ -32,6 +35,25 @@
 workaround_reset_end cortex_a520, ERRATUM(2858100)
 
 check_erratum_ls cortex_a520, ERRATUM(2858100), CPU_REV(0, 1)
+
+workaround_runtime_start cortex_a520, ERRATUM(2938996), ERRATA_A520_2938996, CORTEX_A520_MIDR
+workaround_runtime_end cortex_a520, ERRATUM(2938996)
+
+check_erratum_custom_start cortex_a520, ERRATUM(2938996)
+
+       /* This erratum needs to be enabled for r0p0 and r0p1.
+        * Check if revision is less than or equal to r0p1.
+        */
+
+#if ERRATA_A520_2938996
+       mov     x1, #1
+       b       cpu_rev_var_ls
+#else
+       mov     x0, #ERRATA_MISSING
+#endif
+       ret
+check_erratum_custom_end cortex_a520, ERRATUM(2938996)
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -46,8 +68,6 @@
 	ret
 endfunc cortex_a520_core_pwr_dwn
 
-errata_report_shim cortex_a520
-
 cpu_reset_func_start cortex_a520
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index e6fb08a..4a5b318 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -199,8 +199,6 @@
 	b	cortex_a53_disable_smp
 endfunc cortex_a53_cluster_pwr_dwn
 
-errata_report_shim cortex_a53
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a53 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
index 712b6e0..d5a74e9 100644
--- a/lib/cpus/aarch64/cortex_a55.S
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -116,8 +116,6 @@
 cpu_reset_func_start cortex_a55
 cpu_reset_func_end cortex_a55
 
-errata_report_shim cortex_a55
-
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index 8fafaca..374cc5d 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -284,8 +284,6 @@
 	b	cortex_a57_disable_ext_debug
 endfunc cortex_a57_cluster_pwr_dwn
 
-errata_report_shim cortex_a57
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a57 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S
index 666324c..3023ecb 100644
--- a/lib/cpus/aarch64/cortex_a65.S
+++ b/lib/cpus/aarch64/cortex_a65.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -45,26 +45,6 @@
 	ret
 endfunc cortex_a65_cpu_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Cortex-A65. Must follow AAPCS.
- */
-func cortex_a65_errata_report
-	stp	x8, x30, [sp, #-16]!
-
-	bl	cpu_get_rev_var
-	mov	x8, x0
-
-	/*
-	 * Report all errata. The revision-variant information is passed to
-	 * checking functions of each errata.
-	 */
-	report_errata ERRATA_DSU_936184, cortex_a65, dsu_936184
-
-	ldp	x8, x30, [sp], #16
-	ret
-endfunc cortex_a65_errata_report
-#endif
 
 .section .rodata.cortex_a65_regs, "aS"
 cortex_a65_regs:  /* The ascii list of register names to be reported */
diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S
index 85d1894..1cbb06a 100644
--- a/lib/cpus/aarch64/cortex_a65ae.S
+++ b/lib/cpus/aarch64/cortex_a65ae.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -41,8 +41,6 @@
 	ret
 endfunc cortex_a65ae_cpu_pwr_dwn
 
-errata_report_shim cortex_a65ae
-
 .section .rodata.cortex_a65ae_regs, "aS"
 cortex_a65ae_regs:  /* The ascii list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index b99fbb3..4c33dda 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -229,8 +229,6 @@
 	ret
 endfunc cortex_a710_core_pwr_dwn
 
-errata_report_shim cortex_a710
-
 cpu_reset_func_start cortex_a710
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_a715.S b/lib/cpus/aarch64/cortex_a715.S
index 16be161..8c9988d 100644
--- a/lib/cpus/aarch64/cortex_a715.S
+++ b/lib/cpus/aarch64/cortex_a715.S
@@ -148,8 +148,6 @@
 	ret
 endfunc cortex_a715_core_pwr_dwn
 
-errata_report_shim cortex_a715
-
 	/* ---------------------------------------------
 	 * This function provides Cortex-A715 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 997f261..c300ea7 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -271,8 +271,6 @@
 	b	cortex_a72_disable_ext_debug
 endfunc cortex_a72_cluster_pwr_dwn
 
-errata_report_shim cortex_a72
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a72 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a720.S b/lib/cpus/aarch64/cortex_a720.S
index 53a1b78..9befb36 100644
--- a/lib/cpus/aarch64/cortex_a720.S
+++ b/lib/cpus/aarch64/cortex_a720.S
@@ -26,6 +26,18 @@
         wa_cve_2022_23960_bhb_vector_table CORTEX_A720_BHB_LOOP_COUNT, cortex_a720
 #endif /* WORKAROUND_CVE_2022_23960 */
 
+workaround_reset_start cortex_a720, ERRATUM(2792132), ERRATA_A720_2792132
+        sysreg_bit_set CORTEX_A720_CPUACTLR2_EL1, BIT(26)
+workaround_reset_end cortex_a720, ERRATUM(2792132)
+
+check_erratum_ls cortex_a720, ERRATUM(2792132), CPU_REV(0, 1)
+
+workaround_reset_start cortex_a720, ERRATUM(2844092), ERRATA_A720_2844092
+        sysreg_bit_set CORTEX_A720_CPUACTLR4_EL1, BIT(11)
+workaround_reset_end cortex_a720, ERRATUM(2844092)
+
+check_erratum_ls cortex_a720, ERRATUM(2844092), CPU_REV(0, 1)
+
 workaround_reset_start cortex_a720, ERRATUM(2926083), ERRATA_A720_2926083
 /* Erratum 2926083 workaround is required only if SPE is enabled */
 #if ENABLE_SPE_FOR_NS != 0
@@ -80,8 +92,6 @@
 	ret
 endfunc cortex_a720_core_pwr_dwn
 
-errata_report_shim cortex_a720
-
 	/* ---------------------------------------------
 	 * This function provides Cortex A720-specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a725.S b/lib/cpus/aarch64/cortex_a725.S
index c08945f..af98d14 100644
--- a/lib/cpus/aarch64/cortex_a725.S
+++ b/lib/cpus/aarch64/cortex_a725.S
@@ -40,8 +40,6 @@
 	ret
 endfunc cortex_a725_core_pwr_dwn
 
-errata_report_shim cortex_a725
-
 	/* ---------------------------------------------
 	 * This function provides Cortex-A725 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 3a6b922..2130ceb 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -178,9 +178,6 @@
 	b	cortex_a73_disable_smp
 endfunc cortex_a73_cluster_pwr_dwn
 
-
-errata_report_shim cortex_a73
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a73 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index c90be67..9115303 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -146,8 +146,6 @@
 	ret
 endfunc cortex_a75_core_pwr_dwn
 
-errata_report_shim cortex_a75
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a75 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 8b3d730..97e036e 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -511,8 +511,6 @@
 	ret
 endfunc cortex_a76_core_pwr_dwn
 
-errata_report_shim cortex_a76
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a76 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a76ae.S b/lib/cpus/aarch64/cortex_a76ae.S
index 08a6ef9..2fe3dbc 100644
--- a/lib/cpus/aarch64/cortex_a76ae.S
+++ b/lib/cpus/aarch64/cortex_a76ae.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -41,8 +41,6 @@
 cpu_reset_func_start cortex_a76ae
 cpu_reset_func_end cortex_a76ae
 
-errata_report_shim cortex_a76ae
-
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index 86c2561..d1fc41a 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -167,7 +167,6 @@
 	ret
 endfunc cortex_a77_core_pwr_dwn
 
-errata_report_shim cortex_a77
 	/* ---------------------------------------------
 	 * This function provides Cortex-A77 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index b5c24e1..5a63e78 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -198,8 +198,6 @@
 	ret
 endfunc cortex_a78_core_pwr_dwn
 
-errata_report_shim cortex_a78
-
 	/* ---------------------------------------------
 	 * This function provides cortex_a78 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a78_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S
index d3a3e5d..bc10186 100644
--- a/lib/cpus/aarch64/cortex_a78_ae.S
+++ b/lib/cpus/aarch64/cortex_a78_ae.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
  * Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -128,8 +128,6 @@
 	ret
 endfunc cortex_a78_ae_core_pwr_dwn
 
-errata_report_shim cortex_a78_ae
-
 	/* -------------------------------------------------------
 	 * This function provides cortex_a78_ae specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S
index 0dc34f7..97d5743 100644
--- a/lib/cpus/aarch64/cortex_a78c.S
+++ b/lib/cpus/aarch64/cortex_a78c.S
@@ -121,8 +121,6 @@
 cpu_reset_func_start cortex_a78c
 cpu_reset_func_end cortex_a78c
 
-errata_report_shim cortex_a78c
-
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_gelas.S b/lib/cpus/aarch64/cortex_gelas.S
index 8870019..891e9a6 100644
--- a/lib/cpus/aarch64/cortex_gelas.S
+++ b/lib/cpus/aarch64/cortex_gelas.S
@@ -58,8 +58,6 @@
 	ret
 endfunc cortex_gelas_core_pwr_dwn
 
-errata_report_shim cortex_gelas
-
 	/* ---------------------------------------------
 	 * This function provides Gelas specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S
index 42634f1..ca6cac9 100644
--- a/lib/cpus/aarch64/cortex_x1.S
+++ b/lib/cpus/aarch64/cortex_x1.S
@@ -66,8 +66,6 @@
 	ret
 endfunc cortex_x1_core_pwr_dwn
 
-errata_report_shim cortex_x1
-
        /* ---------------------------------------------
 	* This function provides Cortex X1 specific
 	* register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index d018182..ab0b19d 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -182,8 +182,6 @@
 	ret
 endfunc cortex_x2_core_pwr_dwn
 
-errata_report_shim cortex_x2
-
 cpu_reset_func_start cortex_x2
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
index 49e9ad1..248f107 100644
--- a/lib/cpus/aarch64/cortex_x3.S
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -125,8 +125,6 @@
 	ret
 endfunc cortex_x3_core_pwr_dwn
 
-errata_report_shim cortex_x3
-
 	/* ---------------------------------------------
 	 * This function provides Cortex-X3-
 	 * specific register information for crash
diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S
index 20f1ae1..1220d38 100644
--- a/lib/cpus/aarch64/cortex_x4.S
+++ b/lib/cpus/aarch64/cortex_x4.S
@@ -22,10 +22,30 @@
 #error "Cortex X4 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+.global check_erratum_cortex_x4_2726228
+
 #if WORKAROUND_CVE_2022_23960
         wa_cve_2022_23960_bhb_vector_table CORTEX_X4_BHB_LOOP_COUNT, cortex_x4
 #endif /* WORKAROUND_CVE_2022_23960 */
 
+workaround_runtime_start cortex_x4, ERRATUM(2726228), ERRATA_X4_2726228, CORTEX_X4_MIDR
+workaround_runtime_end cortex_x4, ERRATUM(2726228)
+
+check_erratum_custom_start cortex_x4, ERRATUM(2726228)
+
+	/* This erratum needs to be enabled for r0p0 and r0p1.
+	 * Check if revision is less than or equal to r0p1.
+	 */
+
+#if ERRATA_X4_2726228
+	mov	x1, #1
+	b	cpu_rev_var_ls
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+check_erratum_custom_end cortex_x4, ERRATUM(2726228)
+
 workaround_runtime_start cortex_x4, ERRATUM(2740089), ERRATA_X4_2740089
 	/* dsb before isb of power down sequence */
 	dsb	sy
@@ -39,6 +59,16 @@
 
 check_erratum_ls cortex_x4, ERRATUM(2763018), CPU_REV(0, 1)
 
+workaround_reset_start cortex_x4, ERRATUM(2816013), ERRATA_X4_2816013
+	mrs x1, id_aa64pfr1_el1
+	ubfx x2, x1, ID_AA64PFR1_EL1_MTE_SHIFT, #4
+	cbz x2, #1f
+	sysreg_bit_set CORTEX_X4_CPUACTLR5_EL1, BIT(14)
+1:
+workaround_reset_end cortex_x4, ERRATUM(2816013)
+
+check_erratum_ls cortex_x4, ERRATUM(2816013), CPU_REV(0, 1)
+
 workaround_reset_start cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
 #if IMAGE_BL31
 	/*
@@ -73,8 +103,6 @@
 	ret
 endfunc cortex_x4_core_pwr_dwn
 
-errata_report_shim cortex_x4
-
 	/* ---------------------------------------------
 	 * This function provides Cortex X4-specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/cortex_x925.S b/lib/cpus/aarch64/cortex_x925.S
index 36b442e..8109ffb 100644
--- a/lib/cpus/aarch64/cortex_x925.S
+++ b/lib/cpus/aarch64/cortex_x925.S
@@ -40,8 +40,6 @@
 	ret
 endfunc cortex_x925_core_pwr_dwn
 
-errata_report_shim cortex_x925
-
 	/* ---------------------------------------------
 	 * This function provides Cortex-X925 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index 884281d..ca250d3 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -296,8 +296,6 @@
 	ret
 endfunc denver_cluster_pwr_dwn
 
-errata_report_shim denver
-
 	/* ---------------------------------------------
 	 * This function provides Denver specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S
index ef1f048..5d7a857 100644
--- a/lib/cpus/aarch64/generic.S
+++ b/lib/cpus/aarch64/generic.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -79,7 +79,6 @@
  * Unimplemented functions.
  * ---------------------------------------------
  */
-.equ	generic_errata_report,		0
 .equ	generic_cpu_reg_dump,		0
 .equ	generic_reset_func,		0
 
diff --git a/lib/cpus/aarch64/neoverse_e1.S b/lib/cpus/aarch64/neoverse_e1.S
index 45bd8d3..4bc95d0 100644
--- a/lib/cpus/aarch64/neoverse_e1.S
+++ b/lib/cpus/aarch64/neoverse_e1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -42,8 +42,6 @@
 	ret
 endfunc neoverse_e1_cpu_pwr_dwn
 
-errata_report_shim neoverse_e1
-
 .section .rodata.neoverse_e1_regs, "aS"
 neoverse_e1_regs:  /* The ascii list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index 36a7ee7..50e1ae3 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -242,8 +242,6 @@
 	ret
 endfunc neoverse_n1_core_pwr_dwn
 
-errata_report_shim neoverse_n1
-
 /*
  * Handle trap of EL0 IC IVAU instructions to EL3 by executing a TLB
  * inner-shareable invalidation to an arbitrary address followed by a DSB.
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index a85d956..7d7cc44 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -282,8 +282,6 @@
 	ret
 endfunc neoverse_n2_core_pwr_dwn
 
-errata_report_shim neoverse_n2
-
 	/* ---------------------------------------------
 	 * This function provides Neoverse N2 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/neoverse_n3.S b/lib/cpus/aarch64/neoverse_n3.S
index 0b33b7e..d96c9d4 100644
--- a/lib/cpus/aarch64/neoverse_n3.S
+++ b/lib/cpus/aarch64/neoverse_n3.S
@@ -45,8 +45,6 @@
 	ret
 endfunc neoverse_n3_core_pwr_dwn
 
-errata_report_shim neoverse_n3
-
 	/* ---------------------------------------------
 	 * This function provides Neoverse-N3 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index c2fbb11..89299b7 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -259,8 +259,6 @@
 	ret
 endfunc neoverse_v1_core_pwr_dwn
 
-errata_report_shim neoverse_v1
-
 cpu_reset_func_start neoverse_v1
 	/* Disable speculative loads */
 	msr	SSBS, xzr
diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S
index 3179918..d8c32a4 100644
--- a/lib/cpus/aarch64/neoverse_v2.S
+++ b/lib/cpus/aarch64/neoverse_v2.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -116,7 +116,6 @@
 #endif
 cpu_reset_func_end neoverse_v2
 
-errata_report_shim neoverse_v2
 	/* ---------------------------------------------
 	 * This function provides Neoverse V2-
 	 * specific register information for crash
diff --git a/lib/cpus/aarch64/neoverse_v3.S b/lib/cpus/aarch64/neoverse_v3.S
index 67258c8..01ac38f 100644
--- a/lib/cpus/aarch64/neoverse_v3.S
+++ b/lib/cpus/aarch64/neoverse_v3.S
@@ -60,8 +60,6 @@
 	msr	SSBS, xzr
 cpu_reset_func_end neoverse_v3
 
-errata_report_shim neoverse_v3
-
 	/* ---------------------------------------------
 	 * This function provides Neoverse V3 specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/nevis.S b/lib/cpus/aarch64/nevis.S
index 36830a9..0180ab7 100644
--- a/lib/cpus/aarch64/nevis.S
+++ b/lib/cpus/aarch64/nevis.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -40,8 +40,6 @@
 	ret
 endfunc nevis_core_pwr_dwn
 
-errata_report_shim nevis
-
 .section .rodata.nevis_regs, "aS"
 nevis_regs: /* The ASCII list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S
index 00963bc..fb03cf1 100644
--- a/lib/cpus/aarch64/qemu_max.S
+++ b/lib/cpus/aarch64/qemu_max.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -47,8 +47,6 @@
 	b	dcsw_op_all
 endfunc qemu_max_cluster_pwr_dwn
 
-errata_report_shim qemu_max
-
 	/* ---------------------------------------------
 	 * This function provides cpu specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
index c770f54..ea687be 100644
--- a/lib/cpus/aarch64/rainier.S
+++ b/lib/cpus/aarch64/rainier.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -80,8 +80,6 @@
 	ret
 endfunc rainier_core_pwr_dwn
 
-errata_report_shim rainier
-
 	/* ---------------------------------------------
 	 * This function provides Rainier specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/aarch64/travis.S b/lib/cpus/aarch64/travis.S
index ba06f55..e8b3860 100644
--- a/lib/cpus/aarch64/travis.S
+++ b/lib/cpus/aarch64/travis.S
@@ -54,8 +54,6 @@
 	ret
 endfunc travis_core_pwr_dwn
 
-errata_report_shim travis
-
 .section .rodata.travis_regs, "aS"
 travis_regs: /* The ASCII list of register names to be reported */
 	.asciz	"cpuectlr_el1", ""
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index f736b5a..c55597a 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -823,6 +823,10 @@
 # cpu and is fixed in r0p1.
 CPU_FLAG_LIST += ERRATA_X4_2701112
 
+# Flag to apply erratum 2726228 workaround during warmboot. This erratum
+# applies to all revisions <= r0p1 of the Cortex-X4 cpu, it is fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_X4_2726228
+
 # Flag to apply erratum 2740089 workaround during powerdown. This erratum
 # applies to all revisions <= r0p1 of the Cortex-X4 cpu, it is fixed in r0p2.
 CPU_FLAG_LIST += ERRATA_X4_2740089
@@ -831,6 +835,10 @@
 # to revisions r0p0 and r0p1 of the Cortex-X4 cpu. It is fixed in r0p2.
 CPU_FLAG_LIST += ERRATA_X4_2763018
 
+# Flag to apply erratum 2816013 workaround on reset. This erratum applies
+# to revisions r0p0 and r0p1 of the Cortex-X4 cpu. It is fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_X4_2816013
+
 # Flag to apply erratum 1922240 workaround during reset. This erratum applies
 # to revision r0p0 of the Cortex-A510 cpu and is fixed in r0p1.
 CPU_FLAG_LIST += ERRATA_A510_1922240
@@ -892,6 +900,10 @@
 # applies to revision r0p0 and r0p1 of the Cortex-A520 cpu and is still open.
 CPU_FLAG_LIST += ERRATA_A520_2858100
 
+# Flag to apply erratum 2938996 workaround during reset. This erratum
+# applies to revision r0p0 and r0p1 of the Cortex-A520 cpu and is fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_A520_2938996
+
 # Flag to apply erratum 2331132 workaround during reset. This erratum applies
 # to revisions r0p0, r0p1 and r0p2. It is still open.
 CPU_FLAG_LIST += ERRATA_V2_2331132
@@ -952,6 +964,14 @@
 # only to revision r0p0, r1p0 and r1p1. It is fixed in r1p2.
 CPU_FLAG_LIST += ERRATA_A715_2728106
 
+# Flag to apply erratum 2792132 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1. It is fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_A720_2792132
+
+# Flag to apply erratum 2844092 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1. It is fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_A720_2844092
+
 # Flag to apply erratum 2926083 workaround during reset. This erratum applies
 # to revisions r0p0 and r0p1. It is fixed in r0p2.
 CPU_FLAG_LIST += ERRATA_A720_2926083
diff --git a/lib/cpus/errata_common.c b/lib/cpus/errata_common.c
new file mode 100644
index 0000000..9801245
--- /dev/null
+++ b/lib/cpus/errata_common.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Runtime C routines for errata workarounds and common routines */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <cortex_a520.h>
+#include <cortex_x4.h>
+#include <lib/cpus/cpu_ops.h>
+#include <lib/cpus/errata.h>
+
+#if ERRATA_A520_2938996 || ERRATA_X4_2726228
+unsigned int check_if_affected_core(void)
+{
+	uint32_t midr_val = read_midr();
+	long rev_var  = cpu_get_rev_var();
+
+	if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(CORTEX_A520_MIDR)) {
+		return check_erratum_cortex_a520_2938996(rev_var);
+	} else if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(CORTEX_X4_MIDR)) {
+		return check_erratum_cortex_x4_2726228(rev_var);
+	}
+
+	return ERRATA_NOT_APPLIES;
+}
+#endif
diff --git a/lib/cpus/errata_report.c b/lib/cpus/errata_report.c
index 27cfc91..e0a9076 100644
--- a/lib/cpus/errata_report.c
+++ b/lib/cpus/errata_report.c
@@ -67,7 +67,7 @@
  * save space. This functionality is only useful on development and platform
  * bringup builds, when FEATURE_DETECTION should be used anyway
  */
-void __unused generic_errata_report(void)
+void generic_errata_report(void)
 {
 	struct cpu_ops *cpu_ops = get_cpu_ops_ptr();
 	struct erratum_entry *entry = cpu_ops->errata_list_start;
@@ -159,70 +159,16 @@
  */
 void print_errata_status(void)
 {
-	struct cpu_ops *cpu_ops;
 #ifdef IMAGE_BL1
-	/*
-	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
-	 * directly.
-	 */
-	cpu_ops = get_cpu_ops_ptr();
-
-	if (cpu_ops->errata_func != NULL) {
-		cpu_ops->errata_func();
-	}
+	generic_errata_report();
 #else /* IMAGE_BL1 */
-	cpu_ops = (void *) get_cpu_data(cpu_ops_ptr);
+	struct cpu_ops *cpu_ops = (void *) get_cpu_data(cpu_ops_ptr);
 
 	assert(cpu_ops != NULL);
 
-	if (cpu_ops->errata_func == NULL) {
-		return;
-	}
-
 	if (errata_needs_reporting(cpu_ops->errata_lock, cpu_ops->errata_reported)) {
-		cpu_ops->errata_func();
+		generic_errata_report();
 	}
 #endif /* IMAGE_BL1 */
 }
-
-/*
- * Old errata status message printer
- * TODO: remove once all cpus have been converted to the new printing method
- */
-void __unused errata_print_msg(unsigned int status, const char *cpu, const char *id)
-{
-	/* Errata status strings */
-	static const char *const errata_status_str[] = {
-		[ERRATA_NOT_APPLIES] = "not applied",
-		[ERRATA_APPLIES] = "applied",
-		[ERRATA_MISSING] = "missing!"
-	};
-	static const char *const __unused bl_str = BL_STRING;
-	const char *msg __unused;
-
-
-	assert(status < ARRAY_SIZE(errata_status_str));
-	assert(cpu != NULL);
-	assert(id != NULL);
-
-	msg = errata_status_str[status];
-
-	switch (status) {
-	case ERRATA_NOT_APPLIES:
-		VERBOSE(ERRATA_FORMAT, bl_str, cpu, id, msg);
-		break;
-
-	case ERRATA_APPLIES:
-		INFO(ERRATA_FORMAT, bl_str, cpu, id, msg);
-		break;
-
-	case ERRATA_MISSING:
-		WARN(ERRATA_FORMAT, bl_str, cpu, id, msg);
-		break;
-
-	default:
-		WARN(ERRATA_FORMAT, bl_str, cpu, id, "unknown");
-		break;
-	}
-}
 #endif /* !REPORT_ERRATA */
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 62895ff..ab9d4b6 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -9,12 +9,18 @@
 #include <assert_macros.S>
 #include <context.h>
 #include <el3_common_macros.S>
+#include <platform_def.h>
 
 #if CTX_INCLUDE_FPREGS
 	.global	fpregs_context_save
 	.global	fpregs_context_restore
 #endif /* CTX_INCLUDE_FPREGS */
 
+#if CTX_INCLUDE_SVE_REGS
+	.global sve_context_save
+	.global sve_context_restore
+#endif /* CTX_INCLUDE_SVE_REGS */
+
 #if ERRATA_SPECULATIVE_AT
 	.global save_and_update_ptw_el1_sys_regs
 #endif /* ERRATA_SPECULATIVE_AT */
@@ -23,6 +29,36 @@
 	.global	restore_gp_pmcr_pauth_regs
 	.global	el3_exit
 
+/* Following macros will be used if any of CTX_INCLUDE_FPREGS or CTX_INCLUDE_SVE_REGS is enabled */
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+.macro fpregs_state_save base:req hold:req
+	mrs	\hold, fpsr
+	str	\hold, [\base, #CTX_SIMD_FPSR]
+
+	mrs	\hold, fpcr
+	str	\hold, [\base, #CTX_SIMD_FPCR]
+
+#if CTX_INCLUDE_AARCH32_REGS && CTX_INCLUDE_FPREGS
+	mrs	\hold, fpexc32_el2
+	str	\hold, [\base, #CTX_SIMD_FPEXC32]
+#endif
+.endm
+
+.macro fpregs_state_restore base:req hold:req
+	ldr	\hold, [\base, #CTX_SIMD_FPSR]
+	msr	fpsr, \hold
+
+	ldr	\hold, [\base, #CTX_SIMD_FPCR]
+	msr	fpcr, \hold
+
+#if CTX_INCLUDE_AARCH32_REGS && CTX_INCLUDE_FPREGS
+	ldr	\hold, [\base, #CTX_SIMD_FPEXC32]
+	msr	fpexc32_el2, \hold
+#endif
+.endm
+
+#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
+
 /* ------------------------------------------------------------------
  * The following function follows the aapcs_64 strictly to use
  * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
@@ -39,33 +75,25 @@
  */
 #if CTX_INCLUDE_FPREGS
 func fpregs_context_save
-	stp	q0, q1, [x0, #CTX_FP_Q0]
-	stp	q2, q3, [x0, #CTX_FP_Q2]
-	stp	q4, q5, [x0, #CTX_FP_Q4]
-	stp	q6, q7, [x0, #CTX_FP_Q6]
-	stp	q8, q9, [x0, #CTX_FP_Q8]
-	stp	q10, q11, [x0, #CTX_FP_Q10]
-	stp	q12, q13, [x0, #CTX_FP_Q12]
-	stp	q14, q15, [x0, #CTX_FP_Q14]
-	stp	q16, q17, [x0, #CTX_FP_Q16]
-	stp	q18, q19, [x0, #CTX_FP_Q18]
-	stp	q20, q21, [x0, #CTX_FP_Q20]
-	stp	q22, q23, [x0, #CTX_FP_Q22]
-	stp	q24, q25, [x0, #CTX_FP_Q24]
-	stp	q26, q27, [x0, #CTX_FP_Q26]
-	stp	q28, q29, [x0, #CTX_FP_Q28]
-	stp	q30, q31, [x0, #CTX_FP_Q30]
+	stp	q0, q1, [x0], #32
+	stp	q2, q3, [x0], #32
+	stp	q4, q5, [x0], #32
+	stp	q6, q7, [x0], #32
+	stp	q8, q9, [x0], #32
+	stp	q10, q11, [x0], #32
+	stp	q12, q13, [x0], #32
+	stp	q14, q15, [x0], #32
+	stp	q16, q17, [x0], #32
+	stp	q18, q19, [x0], #32
+	stp	q20, q21, [x0], #32
+	stp	q22, q23, [x0], #32
+	stp	q24, q25, [x0], #32
+	stp	q26, q27, [x0], #32
+	stp	q28, q29, [x0], #32
+	stp	q30, q31, [x0], #32
 
-	mrs	x9, fpsr
-	str	x9, [x0, #CTX_FP_FPSR]
+	fpregs_state_save x0, x9
 
-	mrs	x10, fpcr
-	str	x10, [x0, #CTX_FP_FPCR]
-
-#if CTX_INCLUDE_AARCH32_REGS
-	mrs	x11, fpexc32_el2
-	str	x11, [x0, #CTX_FP_FPEXC32_EL2]
-#endif /* CTX_INCLUDE_AARCH32_REGS */
 	ret
 endfunc fpregs_context_save
 
@@ -84,51 +112,196 @@
  * ------------------------------------------------------------------
  */
 func fpregs_context_restore
-	ldp	q0, q1, [x0, #CTX_FP_Q0]
-	ldp	q2, q3, [x0, #CTX_FP_Q2]
-	ldp	q4, q5, [x0, #CTX_FP_Q4]
-	ldp	q6, q7, [x0, #CTX_FP_Q6]
-	ldp	q8, q9, [x0, #CTX_FP_Q8]
-	ldp	q10, q11, [x0, #CTX_FP_Q10]
-	ldp	q12, q13, [x0, #CTX_FP_Q12]
-	ldp	q14, q15, [x0, #CTX_FP_Q14]
-	ldp	q16, q17, [x0, #CTX_FP_Q16]
-	ldp	q18, q19, [x0, #CTX_FP_Q18]
-	ldp	q20, q21, [x0, #CTX_FP_Q20]
-	ldp	q22, q23, [x0, #CTX_FP_Q22]
-	ldp	q24, q25, [x0, #CTX_FP_Q24]
-	ldp	q26, q27, [x0, #CTX_FP_Q26]
-	ldp	q28, q29, [x0, #CTX_FP_Q28]
-	ldp	q30, q31, [x0, #CTX_FP_Q30]
+	ldp	q0, q1, [x0], #32
+	ldp	q2, q3, [x0], #32
+	ldp	q4, q5, [x0], #32
+	ldp	q6, q7, [x0], #32
+	ldp	q8, q9, [x0], #32
+	ldp	q10, q11, [x0], #32
+	ldp	q12, q13, [x0], #32
+	ldp	q14, q15, [x0], #32
+	ldp	q16, q17, [x0], #32
+	ldp	q18, q19, [x0], #32
+	ldp	q20, q21, [x0], #32
+	ldp	q22, q23, [x0], #32
+	ldp	q24, q25, [x0], #32
+	ldp	q26, q27, [x0], #32
+	ldp	q28, q29, [x0], #32
+	ldp	q30, q31, [x0], #32
 
-	ldr	x9, [x0, #CTX_FP_FPSR]
-	msr	fpsr, x9
+	fpregs_state_restore x0, x9
 
-	ldr	x10, [x0, #CTX_FP_FPCR]
-	msr	fpcr, x10
+	ret
+endfunc fpregs_context_restore
+#endif /* CTX_INCLUDE_FPREGS */
+
+#if CTX_INCLUDE_SVE_REGS
+/*
+ * Helper macros for SVE predicates save/restore operations.
+ */
+.macro sve_predicate_op op:req reg:req
+	\op p0, [\reg, #0, MUL VL]
+	\op p1, [\reg, #1, MUL VL]
+	\op p2, [\reg, #2, MUL VL]
+	\op p3, [\reg, #3, MUL VL]
+	\op p4, [\reg, #4, MUL VL]
+	\op p5, [\reg, #5, MUL VL]
+	\op p6, [\reg, #6, MUL VL]
+	\op p7, [\reg, #7, MUL VL]
+	\op p8, [\reg, #8, MUL VL]
+	\op p9, [\reg, #9, MUL VL]
+	\op p10, [\reg, #10, MUL VL]
+	\op p11, [\reg, #11, MUL VL]
+	\op p12, [\reg, #12, MUL VL]
+	\op p13, [\reg, #13, MUL VL]
+	\op p14, [\reg, #14, MUL VL]
+	\op p15, [\reg, #15, MUL VL]
+.endm
 
-#if CTX_INCLUDE_AARCH32_REGS
-	ldr	x11, [x0, #CTX_FP_FPEXC32_EL2]
-	msr	fpexc32_el2, x11
-#endif /* CTX_INCLUDE_AARCH32_REGS */
+.macro sve_vectors_op op:req reg:req
+	\op z0, [\reg, #0, MUL VL]
+	\op z1, [\reg, #1, MUL VL]
+	\op z2, [\reg, #2, MUL VL]
+	\op z3, [\reg, #3, MUL VL]
+	\op z4, [\reg, #4, MUL VL]
+	\op z5, [\reg, #5, MUL VL]
+	\op z6, [\reg, #6, MUL VL]
+	\op z7, [\reg, #7, MUL VL]
+	\op z8, [\reg, #8, MUL VL]
+	\op z9, [\reg, #9, MUL VL]
+	\op z10, [\reg, #10, MUL VL]
+	\op z11, [\reg, #11, MUL VL]
+	\op z12, [\reg, #12, MUL VL]
+	\op z13, [\reg, #13, MUL VL]
+	\op z14, [\reg, #14, MUL VL]
+	\op z15, [\reg, #15, MUL VL]
+	\op z16, [\reg, #16, MUL VL]
+	\op z17, [\reg, #17, MUL VL]
+	\op z18, [\reg, #18, MUL VL]
+	\op z19, [\reg, #19, MUL VL]
+	\op z20, [\reg, #20, MUL VL]
+	\op z21, [\reg, #21, MUL VL]
+	\op z22, [\reg, #22, MUL VL]
+	\op z23, [\reg, #23, MUL VL]
+	\op z24, [\reg, #24, MUL VL]
+	\op z25, [\reg, #25, MUL VL]
+	\op z26, [\reg, #26, MUL VL]
+	\op z27, [\reg, #27, MUL VL]
+	\op z28, [\reg, #28, MUL VL]
+	\op z29, [\reg, #29, MUL VL]
+	\op z30, [\reg, #30, MUL VL]
+	\op z31, [\reg, #31, MUL VL]
+.endm
 
-	/*
-	 * No explict ISB required here as ERET to
-	 * switch to secure EL1 or non-secure world
-	 * covers it
-	 */
+/* ------------------------------------------------------------------
+ * The following function follows the aapcs_64 strictly to use x9-x17
+ * (temporary caller-saved registers according to AArch64 PCS) to
+ * restore SVE register context. It assumes that 'x0' is
+ * pointing to a 'sve_regs_t' structure to which the register context
+ * will be saved.
+ * ------------------------------------------------------------------
+ */
+func sve_context_save
+.arch_extension sve
+	/* Temporarily enable SVE */
+	mrs	x10, cptr_el3
+	orr	x11, x10, #CPTR_EZ_BIT
+	bic	x11, x11, #TFP_BIT
+	msr	cptr_el3, x11
+	isb
+
+	/* zcr_el3 */
+	mrs	x12, S3_6_C1_C2_0
+	mov	x13, #((SVE_VECTOR_LEN >> 7) - 1)
+	msr	S3_6_C1_C2_0, x13
+	isb
+
+	/* Predicate registers */
+	mov x13, #CTX_SIMD_PREDICATES
+	add	x9, x0, x13
+	sve_predicate_op str, x9
+
+	/* Save FFR after predicates */
+	mov x13, #CTX_SIMD_FFR
+	add	x9, x0, x13
+	rdffr   p0.b
+	str	p0, [x9]
+
+	/* Save vector registers */
+	mov x13, #CTX_SIMD_VECTORS
+	add	x9, x0, x13
+	sve_vectors_op  str, x9
+
+	/* Restore SVE enablement */
+	msr	S3_6_C1_C2_0, x12 /* zcr_el3 */
+	msr	cptr_el3, x10
+	isb
+.arch_extension nosve
+
+	/* Save FPSR, FPCR and FPEXC32 */
+	fpregs_state_save x0, x9
 
 	ret
-endfunc fpregs_context_restore
-#endif /* CTX_INCLUDE_FPREGS */
+endfunc sve_context_save
+
+/* ------------------------------------------------------------------
+ * The following function follows the aapcs_64 strictly to use x9-x17
+ * (temporary caller-saved registers according to AArch64 PCS) to
+ * restore SVE register context. It assumes that 'x0' is pointing to
+ * a 'sve_regs_t' structure from where the register context will be
+ * restored.
+ * ------------------------------------------------------------------
+ */
+func sve_context_restore
+.arch_extension sve
+	/* Temporarily enable SVE for EL3 */
+	mrs	x10, cptr_el3
+	orr	x11, x10, #CPTR_EZ_BIT
+	bic	x11, x11, #TFP_BIT
+	msr	cptr_el3, x11
+	isb
+
+	/* zcr_el3 */
+	mrs	x12, S3_6_C1_C2_0
+	mov	x13, #((SVE_VECTOR_LEN >> 7) - 1)
+	msr	S3_6_C1_C2_0, x13
+	isb
+
+	/* Restore FFR register before predicates */
+	mov x13, #CTX_SIMD_FFR
+	add	x9, x0, x13
+	ldr	p0, [x9]
+	wrffr	p0.b
+
+	/* Restore predicate registers */
+	mov x13, #CTX_SIMD_PREDICATES
+	add	x9, x0, x13
+	sve_predicate_op ldr, x9
+
+	/* Restore vector registers */
+	mov x13, #CTX_SIMD_VECTORS
+	add	x9, x0, x13
+	sve_vectors_op	ldr, x9
+
+	/* Restore SVE enablement */
+	msr	S3_6_C1_C2_0, x12 /* zcr_el3 */
+	msr	cptr_el3, x10
+	isb
+.arch_extension nosve
+
+	/* Restore FPSR, FPCR and FPEXC32 */
+	fpregs_state_restore x0, x9
+	ret
+endfunc sve_context_restore
+#endif /* CTX_INCLUDE_SVE_REGS */
 
 	/*
 	 * Set SCR_EL3.EA bit to enable SErrors at EL3
 	 */
 	.macro enable_serror_at_el3
-	mrs     x8, scr_el3
-	orr     x8, x8, #SCR_EA_BIT
-	msr     scr_el3, x8
+	mrs	x8, scr_el3
+	orr	x8, x8, #SCR_EA_BIT
+	msr	scr_el3, x8
 	.endm
 
 	/*
@@ -142,13 +315,13 @@
 	 * always enable DIT in EL3
 	 */
 #if ENABLE_FEAT_DIT
-#if ENABLE_FEAT_DIT == 2
+#if ENABLE_FEAT_DIT >= 2
 	mrs	x8, id_aa64pfr0_el1
 	and	x8, x8, #(ID_AA64PFR0_DIT_MASK << ID_AA64PFR0_DIT_SHIFT)
 	cbz	x8, 1f
 #endif
-	mov     x8, #DIT_BIT
-	msr     DIT, x8
+	mov	x8, #DIT_BIT
+	msr	DIT, x8
 1:
 #endif /* ENABLE_FEAT_DIT */
 	.endm /* set_unset_pstate_bits */
@@ -166,8 +339,7 @@
 
 	.macro	restore_mpam3_el3
 #if ENABLE_FEAT_MPAM
-#if ENABLE_FEAT_MPAM == 2
-
+#if ENABLE_FEAT_MPAM >= 2
 	mrs x8, id_aa64pfr0_el1
 	lsr x8, x8, #(ID_AA64PFR0_MPAM_SHIFT)
 	and x8, x8, #(ID_AA64PFR0_MPAM_MASK)
diff --git a/lib/el3_runtime/aarch64/context_debug.c b/lib/el3_runtime/aarch64/context_debug.c
index 9ffa297..b37bcb7 100644
--- a/lib/el3_runtime/aarch64/context_debug.c
+++ b/lib/el3_runtime/aarch64/context_debug.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2023-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -28,19 +28,11 @@
 	return state_names[security_state_idx];
 }
 
-#if CTX_INCLUDE_EL2_REGS
 #define PRINT_MEM_USAGE_SEPARATOR()					\
 	do {								\
 		printf("+-----------+-----------+-----------"		\
-			"+-----------+-----------+-----------+\n");	\
-	} while (false)
-#else
-#define PRINT_MEM_USAGE_SEPARATOR()					\
-	do {								\
-		printf("+-----------+-----------"			\
 		"+-----------+-----------+-----------+\n");		\
 	} while (false)
-#endif /* CTX_INCLUDE_EL2_REGS */
 
 #define NAME_PLACEHOLDER_LEN 14
 
@@ -49,6 +41,11 @@
 		putchar('-');						\
 	}
 
+#define PRINT_SINGLE_MEM_USAGE_SEP_BLOCK()				\
+	do {								\
+		printf("+-----------");					\
+	} while (false)
+
 /********************************************************************************
  * This function prints the allocated memory for a specific security state.
  * Values are grouped by exception level and core. The memory usage for the
@@ -57,64 +54,119 @@
 static size_t report_allocated_memory(unsigned int security_state_idx)
 {
 	size_t core_total = 0U;
+	size_t gp_total = 0U;
 	size_t el3_total = 0U;
-#if CTX_INCLUDE_EL2_REGS
-	size_t el2_total = 0U;
-#endif /* CTX_INCLUDE_EL2_REGS */
-	size_t el1_total = 0U;
 	size_t other_total = 0U;
 	size_t total = 0U;
 	size_t per_world_ctx_size = 0U;
 
+#if CTX_INCLUDE_EL2_REGS
+	size_t el2_total = 0U;
+#else
+	size_t el1_total = 0U;
+#endif /* CTX_INCLUDE_EL2_REGS */
+
+#if CTX_INCLUDE_PAUTH_REGS
+	size_t pauth_total = 0U;
+	PRINT_SINGLE_MEM_USAGE_SEP_BLOCK();
+#endif
+
 	PRINT_MEM_USAGE_SEPARATOR();
-	printf("|    Core   |    EL3    ");
+
+	printf("|    Core   |     GP    |    EL3    ");
 #if CTX_INCLUDE_EL2_REGS
 	printf("|    EL2    ");
+#else
+	printf("|    EL1    ");
 #endif /* CTX_INCLUDE_EL2_REGS */
-	printf("|    EL1    |   Other   |   Total   |\n");
+
+#if CTX_INCLUDE_PAUTH_REGS
+	printf("|   PAUTH   ");
+#endif
+
+	printf("|   Other   |   Total   |\n");
 
 	/* Compute memory usage for each core's context */
 	for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) {
 		size_t size_other = 0U;
 		size_t el3_size = 0U;
+		size_t gp_size = 0U;
 #if CTX_INCLUDE_EL2_REGS
 		size_t el2_size = 0U;
-#endif /* CTX_INCLUDE_EL2_REGS */
+#else
 		size_t el1_size = 0U;
+#endif /* CTX_INCLUDE_EL2_REGS */
+
+#if CTX_INCLUDE_PAUTH_REGS
+		size_t pauth_size = 0U;
+		PRINT_SINGLE_MEM_USAGE_SEP_BLOCK();
+#endif
 
 		PRINT_MEM_USAGE_SEPARATOR();
+
 		cpu_context_t *ctx = (cpu_context_t *)cm_get_context_by_index(i,
 			security_state_idx);
 		core_total = sizeof(*ctx);
 		el3_size = sizeof(ctx->el3state_ctx);
-#if CTX_INCLUDE_EL2_REGS
-		el2_size = sizeof(ctx->el2_sysregs_ctx);
-#endif /* CTX_INCLUDE_EL2_REGS */
-		el1_size = sizeof(ctx->el1_sysregs_ctx);
+		gp_size = sizeof(ctx->gpregs_ctx);
+		size_other = core_total - (el3_size + gp_size);
+		printf("| %9u | %8luB | %8luB ", i, gp_size, el3_size);
 
-		size_other = core_total - el3_size - el1_size;
-		printf("| %9u | %8luB ", i, el3_size);
 #if CTX_INCLUDE_EL2_REGS
+		el2_size = sizeof(ctx->el2_sysregs_ctx);
 		size_other -= el2_size;
+		el2_total += el2_size;
 		printf("| %8luB ", el2_size);
+#else
+		el1_size = sizeof(ctx->el1_sysregs_ctx);
+		size_other -= el1_size;
+		el1_total += el1_size;
+		printf("| %8luB ", el1_size);
 #endif /* CTX_INCLUDE_EL2_REGS */
-		printf("| %8luB | %8luB | %8luB |\n", el1_size, size_other, core_total);
 
+#if CTX_INCLUDE_PAUTH_REGS
+		pauth_size = sizeof(ctx->pauth_ctx);
+		size_other -= pauth_size;
+		pauth_total += pauth_size;
+		printf("| %8luB ", pauth_size);
+#endif
+		printf("| %8luB | %8luB |\n", size_other, core_total);
+
+		gp_total += gp_size;
 		el3_total += el3_size;
-#if CTX_INCLUDE_EL2_REGS
-		el2_total += el2_size;
-#endif /* CTX_INCLUDE_EL2_REGS */
-		el1_total += el1_size;
 		other_total += size_other;
 		total += core_total;
 	}
+
+#if CTX_INCLUDE_PAUTH_REGS
+	PRINT_SINGLE_MEM_USAGE_SEP_BLOCK();
+#endif
+
 	PRINT_MEM_USAGE_SEPARATOR();
+
+#if CTX_INCLUDE_PAUTH_REGS
+	PRINT_SINGLE_MEM_USAGE_SEP_BLOCK();
+#endif
+
 	PRINT_MEM_USAGE_SEPARATOR();
-	printf("|    All    | %8luB ", el3_total);
+
+	printf("|    All    | %8luB | %8luB ", gp_total, el3_total);
+
 #if CTX_INCLUDE_EL2_REGS
 	printf("| %8luB ", el2_total);
+#else
+	printf("| %8luB ", el1_total);
 #endif /* CTX_INCLUDE_EL2_REGS */
-	printf("| %8luB | %8luB | %8luB |\n", el1_total, other_total, total);
+
+#if CTX_INCLUDE_PAUTH_REGS
+	printf("| %8luB ", pauth_total);
+#endif
+
+	printf("| %8luB | %8luB |\n", other_total, total);
+
+#if CTX_INCLUDE_PAUTH_REGS
+	PRINT_SINGLE_MEM_USAGE_SEP_BLOCK();
+#endif
 	PRINT_MEM_USAGE_SEPARATOR();
 	printf("\n");
 
@@ -146,18 +198,10 @@
 
 		printf("Memory usage for %s:\n", context_name);
 		total += report_allocated_memory(i);
-			printf("------------------------"
-#if CTX_INCLUDE_EL2_REGS
-				"------"
-#endif /* CTX_INCLUDE_EL2_REGS */
-			      );
+			printf("------------------------");
 			len = NAME_PLACEHOLDER_LEN - printf("End %s", context_name);
 			PRINT_DASH(len);
-			printf(
-#if CTX_INCLUDE_EL2_REGS
-				"------"
-#endif /* CTX_INCLUDE_EL2_REGS */
-				"-----------------------\n\n");
+			printf("-----------------------\n\n");
 	}
 
 	printf("Total context memory allocated: %luB\n\n", total);
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 15db9e5..6f3b51a 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -19,6 +19,8 @@
 #include <common/debug.h>
 #include <context.h>
 #include <drivers/arm/gicv3.h>
+#include <lib/cpus/cpu_ops.h>
+#include <lib/cpus/errata.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/el3_runtime/cpu_data.h>
 #include <lib/el3_runtime/pubsub_events.h>
@@ -49,6 +51,7 @@
 static void manage_extensions_secure(cpu_context_t *ctx);
 static void manage_extensions_secure_per_world(void);
 
+#if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)))
 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
 {
 	u_register_t sctlr_elx, actlr_elx;
@@ -94,11 +97,7 @@
 #endif
 
 	/* Store the initialised SCTLR_EL1 value in the cpu_context */
-#if (ERRATA_SPECULATIVE_AT)
-	write_ctx_reg(get_errata_speculative_at_ctx(ctx), CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_elx);
-#else
-	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_elx);
-#endif /* ERRATA_SPECULATIVE_AT */
+	write_ctx_sctlr_el1_reg_errata(ctx, sctlr_elx);
 
 	/*
 	 * Base the context ACTLR_EL1 on the current value, as it is
@@ -110,6 +109,7 @@
 	actlr_elx = read_actlr_el1();
 	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), actlr_el1, actlr_elx);
 }
+#endif /* (IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)) */
 
 /******************************************************************************
  * This function performs initializations that are specific to SECURE state
@@ -142,7 +142,7 @@
 	 * Initialize EL1 context registers unless SPMC is running
 	 * at S-EL2.
 	 */
-#if !SPMD_SPM_AT_SEL2
+#if (!SPMD_SPM_AT_SEL2)
 	setup_el1_context(ctx, ep);
 #endif
 
@@ -158,7 +158,6 @@
 	if (!has_secure_perworld_init) {
 		manage_extensions_secure_per_world();
 	}
-
 }
 
 #if ENABLE_RME
@@ -262,11 +261,8 @@
 #endif
 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
 
-	/* Initialize EL1 context registers */
-	setup_el1_context(ctx, ep);
-
 	/* Initialize EL2 context registers */
-#if CTX_INCLUDE_EL2_REGS
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 
 	/*
 	 * Initialize SCTLR_EL2 context register with reset value.
@@ -299,8 +295,10 @@
 		write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgwtr_el2,
 			HFGWTR_EL2_INIT_VAL);
 	}
-
-#endif /* CTX_INCLUDE_EL2_REGS */
+#else
+	/* Initialize EL1 context registers */
+	setup_el1_context(ctx, ep);
+#endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
 
 	manage_extensions_nonsecure(ctx);
 }
@@ -331,7 +329,7 @@
 	 * to boot correctly. However, there are very few registers where this
 	 * is not true and some values need to be recreated.
 	 */
-#if CTX_INCLUDE_EL2_REGS
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 	el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx);
 
 	/*
@@ -347,7 +345,7 @@
 	 * and it may contain access control bits (e.g. CLUSTERPMUEN bit).
 	 */
 	write_el2_ctx_common(el2_ctx, actlr_el2, read_actlr_el2());
-#endif /* CTX_INCLUDE_EL2_REGS */
+#endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
 
 	/* Start with a clean SCR_EL3 copy as all relevant values are set */
 	scr_el3 = SCR_RESET_VAL;
@@ -1089,11 +1087,14 @@
 			}
 		}
 	}
+#if (!CTX_INCLUDE_EL2_REGS)
+	/* Restore EL1 system registers, only when CTX_INCLUDE_EL2_REGS=0 */
 	cm_el1_sysregs_context_restore(security_state);
+#endif
 	cm_set_next_eret_context(security_state);
 }
 
-#if CTX_INCLUDE_EL2_REGS
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 
 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx)
 {
@@ -1521,7 +1522,46 @@
 		write_gcspr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2));
 	}
 }
-#endif /* CTX_INCLUDE_EL2_REGS */
+#endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
+
+#if IMAGE_BL31
+/*********************************************************************************
+* This function allows Architecture features asymmetry among cores.
+* TF-A assumes that all the cores in the platform has architecture feature parity
+* and hence the context is setup on different core (e.g. primary sets up the
+* context for secondary cores).This assumption may not be true for systems where
+* cores are not conforming to same Arch version or there is CPU Erratum which
+* requires certain feature to be be disabled only on a given core.
+*
+* This function is called on secondary cores to override any disparity in context
+* setup by primary, this would be called during warmboot path.
+*********************************************************************************/
+void cm_handle_asymmetric_features(void)
+{
+#if ENABLE_SPE_FOR_NS == FEAT_STATE_CHECK_ASYMMETRIC
+	cpu_context_t *spe_ctx = cm_get_context(NON_SECURE);
+
+	assert(spe_ctx != NULL);
+
+	if (is_feat_spe_supported()) {
+		spe_enable(spe_ctx);
+	} else {
+		spe_disable(spe_ctx);
+	}
+#endif
+#if ERRATA_A520_2938996 || ERRATA_X4_2726228
+	cpu_context_t *trbe_ctx = cm_get_context(NON_SECURE);
+
+	assert(trbe_ctx != NULL);
+
+	if (check_if_affected_core() == ERRATA_APPLIES) {
+		if (is_feat_trbe_supported()) {
+			trbe_disable(trbe_ctx);
+		}
+	}
+#endif
+}
+#endif
 
 /*******************************************************************************
  * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS
@@ -1531,7 +1571,19 @@
  ******************************************************************************/
 void cm_prepare_el3_exit_ns(void)
 {
-#if CTX_INCLUDE_EL2_REGS
+#if IMAGE_BL31
+	/*
+	 * Check and handle Architecture feature asymmetry among cores.
+	 *
+	 * In warmboot path secondary cores context is initialized on core which
+	 * did CPU_ON SMC call, if there is feature asymmetry in these cores handle
+	 * it in this function call.
+	 * For Symmetric cores this is an empty function.
+	 */
+	cm_handle_asymmetric_features();
+#endif
+
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 #if ENABLE_ASSERTIONS
 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
 	assert(ctx != NULL);
@@ -1542,15 +1594,19 @@
 			(el_implemented(2U) != EL_IMPL_NONE));
 #endif /* ENABLE_ASSERTIONS */
 
-	/* Restore EL2 and EL1 sysreg contexts */
+	/* Restore EL2 sysreg contexts */
 	cm_el2_sysregs_context_restore(NON_SECURE);
-	cm_el1_sysregs_context_restore(NON_SECURE);
 	cm_set_next_eret_context(NON_SECURE);
 #else
 	cm_prepare_el3_exit(NON_SECURE);
-#endif /* CTX_INCLUDE_EL2_REGS */
+#endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
 }
 
+#if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)))
+/*******************************************************************************
+ * The next set of six functions are used by runtime services to save and restore
+ * EL1 context on the 'cpu_context' structure for the specified security state.
+ ******************************************************************************/
 static void el1_sysregs_context_save(el1_sysregs_t *ctx)
 {
 	write_el1_ctx_common(ctx, spsr_el1, read_spsr_el1());
@@ -1742,9 +1798,8 @@
 }
 
 /*******************************************************************************
- * The next four functions are used by runtime services to save and restore
- * EL1 context on the 'cpu_context' structure for the specified security
- * state.
+ * The next couple of functions are used by runtime services to save and restore
+ * EL1 context on the 'cpu_context' structure for the specified security state.
  ******************************************************************************/
 void cm_el1_sysregs_context_save(uint32_t security_state)
 {
@@ -1780,6 +1835,8 @@
 #endif
 }
 
+#endif /* ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) */
+
 /*******************************************************************************
  * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
  * given security state with the given entrypoint
diff --git a/lib/el3_runtime/simd_ctx.c b/lib/el3_runtime/simd_ctx.c
new file mode 100644
index 0000000..f7a87df
--- /dev/null
+++ b/lib/el3_runtime/simd_ctx.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <lib/el3_runtime/aarch64/context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/el3_runtime/simd_ctx.h>
+#include <lib/extensions/sve.h>
+#include <plat/common/platform.h>
+
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+
+/* SIMD context managed for Secure and Normal Worlds. */
+#define SIMD_CTXT_COUNT	2
+
+#if SEPARATE_SIMD_SECTION
+__section(".simd_context")
+#else
+__section(".bss.simd_context")
+#endif
+static simd_regs_t simd_context[SIMD_CTXT_COUNT][PLATFORM_CORE_COUNT];
+
+void simd_ctx_save(uint32_t security_state, bool hint_sve)
+{
+	simd_regs_t *regs;
+
+	if (security_state != NON_SECURE && security_state != SECURE) {
+		ERROR("Unsupported security state specified for SIMD context: %u\n",
+		      security_state);
+		panic();
+	}
+
+	regs = &simd_context[security_state][plat_my_core_pos()];
+
+#if CTX_INCLUDE_SVE_REGS
+	regs->hint = hint_sve;
+
+	if (hint_sve) {
+		/*
+		 * Hint bit denoting absence of SVE live state. Hence, only
+		 * save FP context.
+		 */
+		fpregs_context_save(regs);
+	} else {
+		sve_context_save(regs);
+	}
+#elif CTX_INCLUDE_FPREGS
+	fpregs_context_save(regs);
+#endif
+}
+
+void simd_ctx_restore(uint32_t security_state)
+{
+	simd_regs_t *regs;
+
+	if (security_state != NON_SECURE && security_state != SECURE) {
+		ERROR("Unsupported security state specified for SIMD context: %u\n",
+		      security_state);
+		panic();
+	}
+
+	regs = &simd_context[security_state][plat_my_core_pos()];
+
+#if CTX_INCLUDE_SVE_REGS
+	if (regs->hint) {
+		fpregs_context_restore(regs);
+	} else {
+		sve_context_restore(regs);
+	}
+#elif CTX_INCLUDE_FPREGS
+	fpregs_context_restore(regs);
+#endif
+}
+#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
diff --git a/lib/extensions/pmuv3/aarch64/pmuv3.c b/lib/extensions/pmuv3/aarch64/pmuv3.c
index 71aa303..f9e32ca 100644
--- a/lib/extensions/pmuv3/aarch64/pmuv3.c
+++ b/lib/extensions/pmuv3/aarch64/pmuv3.c
@@ -23,13 +23,13 @@
 
 void pmuv3_enable(cpu_context_t *ctx)
 {
-#if CTX_INCLUDE_EL2_REGS
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 	u_register_t mdcr_el2_val;
 
 	mdcr_el2_val = read_el2_ctx_common(get_el2_sysregs_ctx(ctx), mdcr_el2);
 	mdcr_el2_val = init_mdcr_el2_hpmn(mdcr_el2_val);
 	write_el2_ctx_common(get_el2_sysregs_ctx(ctx), mdcr_el2, mdcr_el2_val);
-#endif /* CTX_INCLUDE_EL2_REGS */
+#endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
 }
 
 static u_register_t mtpmu_disable_el3(u_register_t mdcr_el3)
diff --git a/lib/romlib/Makefile b/lib/romlib/Makefile
index 9859ce1..29fbf78 100644
--- a/lib/romlib/Makefile
+++ b/lib/romlib/Makefile
@@ -45,7 +45,7 @@
 
 .PHONY: all clean distclean
 
-all: $(BUILD_DIR)/romlib.bin $(LIB_DIR)/libwrappers.a
+all: $(BUILD_DIR)/romlib.bin $(BUILD_DIR)/romlib.ldflags $(LIB_DIR)/libwrappers.a
 
 %.o: %.s | $$(@D)/
 	$(s)echo "  AS      $@"
@@ -89,6 +89,10 @@
 	$(s)echo "  TBL     $@"
 	$(q)$(ROMLIB_GEN) gentbl --output $@ --bti=$(ENABLE_BTI) $<
 
+$(BUILD_DIR)/romlib.ldflags: ../../$(PLAT_DIR)/jmptbl.i
+	$(s)echo "  LDFLAGS $@"
+	$(q)$(ROMLIB_GEN) link-flags $< > $@
+
 clean:
 	$(q)rm -f $(BUILD_DIR)/*
 
diff --git a/lib/romlib/romlib_generator.py b/lib/romlib/romlib_generator.py
index 0682dd4..8d2e88d 100755
--- a/lib/romlib/romlib_generator.py
+++ b/lib/romlib/romlib_generator.py
@@ -182,6 +182,22 @@
                 template_name = "jmptbl_entry_" + item["type"] + bti + ".S"
                 output_file.write(self.build_template(template_name, item, True))
 
+class LinkArgs(RomlibApplication):
+    """ Generates the link arguments to wrap functions. """
+
+    def __init__(self, prog):
+        RomlibApplication.__init__(self, prog)
+        self.args.add_argument("file", help="Input file")
+
+    def main(self):
+        index_file_parser = IndexFileParser()
+        index_file_parser.parse(self.config.file)
+
+        fns = [item["function_name"] for item in index_file_parser.items
+               if not item["patch"] and item["type"] != "reserved"]
+
+        print(" ".join("-Wl,--wrap " + f for f in fns))
+
 class WrapperGenerator(RomlibApplication):
     """
     Generates a wrapper function for each entry in the index file except for the ones that contain
@@ -214,21 +230,19 @@
             if item["type"] == "reserved" or item["patch"]:
                 continue
 
-            asm = self.config.b + "/" + item["function_name"] + ".s"
-            if self.config.list:
-                # Only listing files
-                files.append(asm)
-            else:
-                with open(asm, "w") as asm_file:
-                    # The jump instruction is 4 bytes but BTI requires and extra instruction so
-                    # this makes it 8 bytes per entry.
-                    function_offset = item_index * (8 if self.config.bti else 4)
+            if not self.config.list:
+                # The jump instruction is 4 bytes but BTI requires and extra instruction so
+                # this makes it 8 bytes per entry.
+                function_offset = item_index * (8 if self.config.bti else 4)
 
-                    item["function_offset"] = function_offset
-                    asm_file.write(self.build_template("wrapper" + bti + ".S", item))
+                item["function_offset"] = function_offset
+                files.append(self.build_template("wrapper" + bti + ".S", item))
 
         if self.config.list:
-            print(" ".join(files))
+            print(self.config.b + "/wrappers.s")
+        else:
+            with open(self.config.b + "/wrappers.s", "w") as asm_file:
+                asm_file.write("\n".join(files))
 
 class VariableGenerator(RomlibApplication):
     """ Generates the jump table global variable with the absolute address in ROM. """
@@ -258,7 +272,8 @@
 
 if __name__ == "__main__":
     APPS = {"genvar": VariableGenerator, "pre": IndexPreprocessor,
-            "gentbl": TableGenerator, "genwrappers": WrapperGenerator}
+            "gentbl": TableGenerator, "genwrappers": WrapperGenerator,
+            "link-flags": LinkArgs}
 
     if len(sys.argv) < 2 or sys.argv[1] not in APPS:
         print("usage: romlib_generator.py [%s] [args]" % "|".join(APPS.keys()), file=sys.stderr)
diff --git a/lib/romlib/templates/wrapper.S b/lib/romlib/templates/wrapper.S
index 734a68a..576474a 100644
--- a/lib/romlib/templates/wrapper.S
+++ b/lib/romlib/templates/wrapper.S
@@ -3,8 +3,9 @@
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
-	.globl	${function_name}
-${function_name}:
+	.section .text.__wrap_${function_name}
+	.globl	__wrap_${function_name}
+__wrap_${function_name}:
 	ldr	x17, =jmptbl
 	mov	x16, #${function_offset}
 	ldr	x17, [x17]
diff --git a/lib/romlib/templates/wrapper_bti.S b/lib/romlib/templates/wrapper_bti.S
index ba9b11c..0dc316c 100644
--- a/lib/romlib/templates/wrapper_bti.S
+++ b/lib/romlib/templates/wrapper_bti.S
@@ -3,8 +3,9 @@
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
-	.globl	${function_name}
-${function_name}:
+	.section .text.__wrap_${function_name}
+	.globl	__wrap_${function_name}
+__wrap_${function_name}:
 	bti	jc
 	ldr	x17, =jmptbl
 	mov	x16, #${function_offset}
diff --git a/make_helpers/build_macros.mk b/make_helpers/build_macros.mk
index 7050916..f523074 100644
--- a/make_helpers/build_macros.mk
+++ b/make_helpers/build_macros.mk
@@ -465,6 +465,10 @@
         $(patsubst %.S,$(BUILD_DIR)/%,$(1))
 endef
 
+ifeq ($(USE_ROMLIB),1)
+WRAPPER_FLAGS := @${BUILD_PLAT}/romlib/romlib.ldflags
+endif
+
 # MAKE_BL macro defines the targets and options to build each BL image.
 # Arguments:
 #   $(1) = BL stage
@@ -514,11 +518,11 @@
 		--map --list="$(MAPFILE)" --scatter=${PLAT_DIR}/scat/${1}.scat \
 		$(LDPATHS) $(LIBWRAPPER) $(LDLIBS) $(BL_LIBS) $(OBJS)
 else ifeq ($($(ARCH)-ld-id),gnu-gcc)
-	$$(q)$($(ARCH)-ld) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) $(BL_LDFLAGS) -Wl,-Map=$(MAPFILE) \
+	$$(q)$($(ARCH)-ld) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) $$(WRAPPER_FLAGS) $(BL_LDFLAGS) -Wl,-Map=$(MAPFILE) \
 		$(addprefix -Wl$(comma)--script$(comma),$(LINKER_SCRIPTS)) -Wl,--script,$(DEFAULT_LINKER_SCRIPT) \
 		$(OBJS) $(LDPATHS) $(LIBWRAPPER) $(LDLIBS) $(BL_LIBS)
 else
-	$$(q)$($(ARCH)-ld) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) $(BL_LDFLAGS) -Map=$(MAPFILE) \
+	$$(q)$($(ARCH)-ld) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) $$(WRAPPER_FLAGS) $(BL_LDFLAGS) -Map=$(MAPFILE) \
 		$(addprefix -T ,$(LINKER_SCRIPTS)) --script $(DEFAULT_LINKER_SCRIPT) \
 		$(OBJS) $(LDPATHS) $(LIBWRAPPER) $(LDLIBS) $(BL_LIBS)
 endif
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 368d26d..290a6fe 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -63,6 +63,9 @@
 # Include FP registers in cpu context
 CTX_INCLUDE_FPREGS		:= 0
 
+# Include SVE registers in cpu context
+CTX_INCLUDE_SVE_REGS		:= 0
+
 # Debug build
 DEBUG				:= 0
 
@@ -237,6 +240,10 @@
 # region, platform Makefile is free to override this value.
 SEPARATE_BL2_NOLOAD_REGION	:= 0
 
+# Put SIMD context data structures in a separate memory region. Platforms
+# have the choice to put it outside of default BSS region of EL3 firmware.
+SEPARATE_SIMD_SECTION		:= 0
+
 # If the BL31 image initialisation code is recalimed after use for the secondary
 # cores stack
 RECLAIM_INIT_CODE		:= 0
diff --git a/plat/arm/board/corstone1000/platform.mk b/plat/arm/board/corstone1000/platform.mk
index 9d44a14..dfde5aa 100644
--- a/plat/arm/board/corstone1000/platform.mk
+++ b/plat/arm/board/corstone1000/platform.mk
@@ -6,7 +6,7 @@
 
 # Making sure the corstone1000 platform type is specified
 ifeq ($(filter ${TARGET_PLATFORM}, fpga fvp),)
-	$(error TARGET_PLATFORM must be fpga or fvp)
+        $(error TARGET_PLATFORM must be fpga or fvp)
 endif
 
 CORSTONE1000_CPU_LIBS	+=lib/cpus/aarch64/cortex_a35.S
diff --git a/plat/arm/board/fvp/fdts/fvp_cactus_sp_manifest.dts b/plat/arm/board/fvp/fdts/fvp_cactus_sp_manifest.dts
new file mode 100644
index 0000000..de804e0
--- /dev/null
+++ b/plat/arm/board/fvp/fdts/fvp_cactus_sp_manifest.dts
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * This file is a Partition Manifest (PM) for a minimal Secure Partition (SP)
+ * that will be consumed by EL3 SPMC.
+ *
+ */
+
+/dts-v1/;
+
+/ {
+	compatible = "arm,ffa-manifest-1.0";
+	#address-cells = <2>;
+	#size-cells = <1>;
+
+	/* Properties */
+	ffa-version = <0x00010001>; /* 31:16 - Major, 15:0 - Minor */
+	id = <0x8001>;
+	uuid = <0x1e67b5b4 0xe14f904a 0x13fb1fb8 0xcbdae1da>;
+	messaging-method = <3>; /* Direct messaging only */
+	exception-level = <2>; /* S-EL1 */
+	execution-state = <0>; /* AARCH64 */
+	execution-ctx-count = <8>;
+	/* Boot protocol */
+	gp-register-num = <0>;
+};
diff --git a/plat/arm/board/fvp/include/plat.ld.S b/plat/arm/board/fvp/include/plat.ld.S
index 7c8bf06..2f99999 100644
--- a/plat/arm/board/fvp/include/plat.ld.S
+++ b/plat/arm/board/fvp/include/plat.ld.S
@@ -1,12 +1,38 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 #ifndef PLAT_LD_S
 #define PLAT_LD_S
 
-#include <plat/arm/common/arm_tzc_dram.ld.S>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+MEMORY {
+    EL3_SEC_DRAM (rw): ORIGIN = ARM_EL3_TZC_DRAM1_BASE, LENGTH = ARM_EL3_TZC_DRAM1_SIZE
+}
+
+SECTIONS
+{
+	. = ARM_EL3_TZC_DRAM1_BASE;
+	ASSERT(. == ALIGN(PAGE_SIZE),
+	"ARM_EL3_TZC_DRAM_BASE address is not aligned on a page boundary.")
+	.el3_tzc_dram (NOLOAD) : ALIGN(PAGE_SIZE) {
+	__PLAT_SPMC_SHMEM_DATASTORE_START__ = .;
+	*(.arm_spmc_shmem_datastore)
+	__PLAT_SPMC_SHMEM_DATASTORE_END__ = .;
+	__EL3_SEC_DRAM_START__ = .;
+	*(.arm_el3_tzc_dram)
+#if SEPARATE_SIMD_SECTION
+	. = ALIGN(16);
+	*(.simd_context)
+#endif
+	__EL3_SEC_DRAM_UNALIGNED_END__ = .;
+
+	. = ALIGN(PAGE_SIZE);
+	__EL3_SEC_DRAM_END__ = .;
+	} >EL3_SEC_DRAM
+}
 
 #if RECLAIM_INIT_CODE
 #include <plat/arm/common/arm_reclaim_init.ld.S>
diff --git a/plat/arm/board/fvp/jmptbl.i b/plat/arm/board/fvp/jmptbl.i
index dc8032f..077283e 100644
--- a/plat/arm/board/fvp/jmptbl.i
+++ b/plat/arm/board/fvp/jmptbl.i
@@ -36,7 +36,6 @@
 fdt     fdt_get_name
 fdt     fdt_get_alias
 fdt     fdt_node_offset_by_phandle
-fdt     fdt_subnode_offset
 fdt     fdt_add_subnode
 mbedtls mbedtls_asn1_get_alg
 mbedtls mbedtls_asn1_get_alg_null
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index cef7bdf..340eb38 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -47,6 +47,10 @@
 ifeq (${CTX_INCLUDE_FPREGS}, 0)
       ENABLE_SME_FOR_NS		:= 2
       ENABLE_SME2_FOR_NS	:= 2
+else
+      ENABLE_SVE_FOR_NS		:= 0
+      ENABLE_SME_FOR_NS		:= 0
+      ENABLE_SME2_FOR_NS	:= 0
 endif
 endif
 
diff --git a/plat/arm/board/neoverse_rd/common/ras/nrd_ras_cpu.c b/plat/arm/board/neoverse_rd/common/ras/nrd_ras_cpu.c
index cf1eb6f..dcee92c 100644
--- a/plat/arm/board/neoverse_rd/common/ras/nrd_ras_cpu.c
+++ b/plat/arm/board/neoverse_rd/common/ras/nrd_ras_cpu.c
@@ -73,29 +73,14 @@
 						  mair_el1);
 	cpu_info->ErrCtxEl1Reg[5]  = read_midr_el1();
 	cpu_info->ErrCtxEl1Reg[6]  = read_mpidr_el1();
-
-#if (ERRATA_SPECULATIVE_AT)
-	cpu_info->ErrCtxEl1Reg[7]  = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
-						  CTX_ERRATA_SPEC_AT_SCTLR_EL1);
-#else
-	cpu_info->ErrCtxEl1Reg[7]  = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
-						  sctlr_el1);
-#endif /* ERRATA_SPECULATIVE_AT */
-
+	cpu_info->ErrCtxEl1Reg[7] = read_ctx_sctlr_el1_reg_errata(ctx);
 	cpu_info->ErrCtxEl1Reg[8]  = read_ctx_reg(get_gpregs_ctx(ctx),
 						  CTX_GPREG_SP_EL0);
 	cpu_info->ErrCtxEl1Reg[9]  = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
 						  sp_el1);
 	cpu_info->ErrCtxEl1Reg[10] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
 						  spsr_el1);
-#if (ERRATA_SPECULATIVE_AT)
-	cpu_info->ErrCtxEl1Reg[11]  = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
-						   CTX_ERRATA_SPEC_AT_TCR_EL1);
-#else
-	cpu_info->ErrCtxEl1Reg[11] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
-						  tcr_el1);
-#endif /* ERRATA_SPECULATIVE_AT */
-
+	cpu_info->ErrCtxEl1Reg[11] = read_ctx_tcr_el1_reg_errata(ctx);
 	cpu_info->ErrCtxEl1Reg[12] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
 						  tpidr_el0);
 	cpu_info->ErrCtxEl1Reg[13] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
@@ -107,7 +92,7 @@
 	cpu_info->ErrCtxEl1Reg[16] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
 						  ttbr1_el1);
 
-#if CTX_INCLUDE_EL2_REGS
+#if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
 	cpu_info->ErrCtxEl2Reg[0]   = read_el2_ctx_common(get_el2_sysregs_ctx(ctx),
 						elr_el2);
 	cpu_info->ErrCtxEl2Reg[1]   = read_el2_ctx_common(get_el2_sysregs_ctx(ctx),
@@ -140,7 +125,7 @@
 						vttbr_el2);
 	cpu_info->ErrCtxEl2Reg[15]  = read_el2_ctx_common(get_el2_sysregs_ctx(ctx),
 						esr_el2);
-#endif /* CTX_INCLUDE_EL2_REGS */
+#endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
 
 	cpu_info->ErrCtxEl3Reg[0]   = read_ctx_reg(get_el3state_ctx(ctx),
 						   CTX_ELR_EL3);
diff --git a/plat/arm/board/neoverse_rd/platform/rdv3/platform.mk b/plat/arm/board/neoverse_rd/platform/rdv3/platform.mk
index 7d770f5..da96730 100644
--- a/plat/arm/board/neoverse_rd/platform/rdv3/platform.mk
+++ b/plat/arm/board/neoverse_rd/platform/rdv3/platform.mk
@@ -21,12 +21,6 @@
 override ARM_ARCH_MAJOR			:= 8
 override ARM_ARCH_MINOR			:= 7
 
-# Image flags
-override NEED_BL1			:= yes
-override NEED_BL2			:= yes
-override NEED_BL32			:= no
-override NEED_RMM			:= no
-
 # Misc options
 override CTX_INCLUDE_AARCH32_REGS	:= 0
 
diff --git a/plat/arm/board/tc/platform.mk b/plat/arm/board/tc/platform.mk
index fb70500..1a7289a 100644
--- a/plat/arm/board/tc/platform.mk
+++ b/plat/arm/board/tc/platform.mk
@@ -34,6 +34,7 @@
 ENABLE_MPMM			:=	1
 ENABLE_MPMM_FCONF		:=	1
 ENABLE_FEAT_MTE2	        :=	2
+ENABLE_SPE_FOR_NS		:=	3
 
 CTX_INCLUDE_AARCH32_REGS	:=	0
 
@@ -109,6 +110,9 @@
 
 # CPU libraries for TARGET_PLATFORM=2
 ifeq (${TARGET_PLATFORM}, 2)
+ERRATA_A520_2938996	:=	1
+ERRATA_X4_2726228	:=	1
+
 TC_CPU_SOURCES	+=	lib/cpus/aarch64/cortex_a520.S \
 			lib/cpus/aarch64/cortex_a720.S \
 			lib/cpus/aarch64/cortex_x4.S
@@ -116,6 +120,8 @@
 
 # CPU libraries for TARGET_PLATFORM=3
 ifeq (${TARGET_PLATFORM}, 3)
+ERRATA_A520_2938996	:=	1
+
 TC_CPU_SOURCES	+=	lib/cpus/aarch64/cortex_a520.S \
 			lib/cpus/aarch64/cortex_a725.S \
 			lib/cpus/aarch64/cortex_x925.S
diff --git a/plat/intel/soc/agilex/bl2_plat_setup.c b/plat/intel/soc/agilex/bl2_plat_setup.c
index 61c0ef2..36820b2 100644
--- a/plat/intel/soc/agilex/bl2_plat_setup.c
+++ b/plat/intel/soc/agilex/bl2_plat_setup.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -30,6 +31,7 @@
 #include "socfpga_reset_manager.h"
 #include "socfpga_ros.h"
 #include "socfpga_system_manager.h"
+#include "socfpga_vab.h"
 #include "wdt/watchdog.h"
 
 static struct mmc_device_info mmc_info;
@@ -112,7 +114,10 @@
 
 	setup_page_tables(bl_regions, agilex_plat_mmap);
 
-	enable_mmu_el3(0);
+	/*
+	 * TODO: mmu enable in latest phase
+	 */
+	// enable_mmu_el3(0);
 
 	dw_mmc_params_t params = EMMC_INIT_PARAMS(0x100000, get_mmc_clk());
 
@@ -173,6 +178,20 @@
 
 	assert(bl_mem_params);
 
+#if SOCFPGA_SECURE_VAB_AUTH
+	/*
+	 * VAB Authentication start here.
+	 * If failed to authenticate, shall not proceed to process BL31 and hang.
+	 */
+	int ret = 0;
+
+	ret = socfpga_vab_init(image_id);
+	if (ret < 0) {
+		ERROR("SOCFPGA VAB Authentication failed\n");
+		wfi();
+	}
+#endif
+
 	switch (image_id) {
 	case BL33_IMAGE_ID:
 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
@@ -191,4 +210,3 @@
 void bl2_platform_setup(void)
 {
 }
-
diff --git a/plat/intel/soc/agilex/platform.mk b/plat/intel/soc/agilex/platform.mk
index 6780845..21cc6a3 100644
--- a/plat/intel/soc/agilex/platform.mk
+++ b/plat/intel/soc/agilex/platform.mk
@@ -1,6 +1,7 @@
 #
 # Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved.
-# Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+# Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+# Copyright (c) 2024, Altera Corporation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -27,6 +28,7 @@
 			plat/intel/soc/common/aarch64/platform_common.c \
 			plat/intel/soc/common/aarch64/plat_helpers.S	\
 			plat/intel/soc/common/drivers/ccu/ncore_ccu.c	\
+			plat/intel/soc/common/lib/sha/sha.c				\
 			plat/intel/soc/common/socfpga_delay_timer.c
 
 BL2_SOURCES     +=	\
@@ -49,6 +51,7 @@
 		plat/intel/soc/common/socfpga_image_load.c		\
 		plat/intel/soc/common/socfpga_ros.c			\
 		plat/intel/soc/common/socfpga_storage.c			\
+		plat/intel/soc/common/socfpga_vab.c				\
 		plat/intel/soc/common/soc/socfpga_emac.c		\
 		plat/intel/soc/common/soc/socfpga_firewall.c	\
 		plat/intel/soc/common/soc/socfpga_handoff.c		\
@@ -78,9 +81,20 @@
 		plat/intel/soc/common/soc/socfpga_mailbox.c		\
 		plat/intel/soc/common/soc/socfpga_reset_manager.c
 
+# Don't have the Linux kernel as a BL33 image by default
+ARM_LINUX_KERNEL_AS_BL33	:=	0
+$(eval $(call assert_boolean,ARM_LINUX_KERNEL_AS_BL33))
+$(eval $(call add_define,ARM_LINUX_KERNEL_AS_BL33))
 $(eval $(call add_define,ARM_PRELOADED_DTB_BASE))
 
+# Configs for VAB Authentication
+SOCFPGA_SECURE_VAB_AUTH  := 	0
+$(eval $(call assert_boolean,SOCFPGA_SECURE_VAB_AUTH))
+$(eval $(call add_define,SOCFPGA_SECURE_VAB_AUTH))
+
 PROGRAMMABLE_RESET_ADDRESS	:= 0
 RESET_TO_BL2			:= 1
 BL2_INV_DCACHE			:= 0
 USE_COHERENT_MEM		:= 1
+
+HANDLE_EA_EL3_FIRST_NS			:= 1
\ No newline at end of file
diff --git a/plat/intel/soc/agilex5/bl2_plat_setup.c b/plat/intel/soc/agilex5/bl2_plat_setup.c
index c74d799..265ee57 100644
--- a/plat/intel/soc/agilex5/bl2_plat_setup.c
+++ b/plat/intel/soc/agilex5/bl2_plat_setup.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved.
  * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -35,6 +36,7 @@
 #include "socfpga_private.h"
 #include "socfpga_reset_manager.h"
 #include "socfpga_ros.h"
+#include "socfpga_vab.h"
 #include "wdt/watchdog.h"
 
 
@@ -165,6 +167,20 @@
 
 	assert(bl_mem_params);
 
+#if SOCFPGA_SECURE_VAB_AUTH
+	/*
+	 * VAB Authentication start here.
+	 * If failed to authenticate, shall not proceed to process BL31 and hang.
+	 */
+	int ret = 0;
+
+	ret = socfpga_vab_init(image_id);
+	if (ret < 0) {
+		ERROR("SOCFPGA VAB Authentication failed\n");
+		wfi();
+	}
+#endif
+
 	switch (image_id) {
 	case BL33_IMAGE_ID:
 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
diff --git a/plat/intel/soc/agilex5/include/agilex5_system_manager.h b/plat/intel/soc/agilex5/include/agilex5_system_manager.h
index 46596bf..53dcd13 100644
--- a/plat/intel/soc/agilex5/include/agilex5_system_manager.h
+++ b/plat/intel/soc/agilex5/include/agilex5_system_manager.h
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -147,6 +148,7 @@
 
 /* QSPI ECC from SDM register */
 #define SOCFPGA_ECC_QSPI_CTRL						0x08
+#define SOCFPGA_ECC_QSPI_INITSTAT					0x0C
 #define SOCFPGA_ECC_QSPI_ERRINTEN					0x10
 #define SOCFPGA_ECC_QSPI_ERRINTENS					0x14
 #define SOCFPGA_ECC_QSPI_ERRINTENR					0x18
diff --git a/plat/intel/soc/agilex5/include/socfpga_plat_def.h b/plat/intel/soc/agilex5/include/socfpga_plat_def.h
index acdbe17..9bfc304 100644
--- a/plat/intel/soc/agilex5/include/socfpga_plat_def.h
+++ b/plat/intel/soc/agilex5/include/socfpga_plat_def.h
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
  * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -21,7 +22,7 @@
 #define PLAT_PRIMARY_CPU_A76					0x200
 #define PLAT_CLUSTER_ID_MPIDR_AFF_SHIFT				MPIDR_AFF2_SHIFT
 #define PLAT_CPU_ID_MPIDR_AFF_SHIFT				MPIDR_AFF1_SHIFT
-#define PLAT_L2_RESET_REQ			0xB007C0DE
+#define PLAT_L2_RESET_REQ					0xB007C0DE
 
 /* System Counter */
 /* TODO: Update back to 400MHz.
@@ -31,7 +32,7 @@
 #define PLAT_SYS_COUNTER_FREQ_IN_MHZ				(400)
 
 /* FPGA config helpers */
-#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR				0x400000
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR				0x80400000
 #define INTEL_SIP_SMC_FPGA_CONFIG_SIZE				0x2000000
 
 /* QSPI Setting */
@@ -101,7 +102,7 @@
 /*******************************************************************************
  * WDT related constants
  ******************************************************************************/
-#define WDT_BASE			(0x10D00200)
+#define WDT_BASE						(0x10D00200)
 
 /*******************************************************************************
  * GIC related constants
@@ -116,13 +117,13 @@
 /*******************************************************************************
  * SDMMC related pointer function
  ******************************************************************************/
-#define SDMMC_READ_BLOCKS	sdmmc_read_blocks
-#define SDMMC_WRITE_BLOCKS	sdmmc_write_blocks
+#define SDMMC_READ_BLOCKS					sdmmc_read_blocks
+#define SDMMC_WRITE_BLOCKS					sdmmc_write_blocks
 
 /*******************************************************************************
  * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset
  * is done and HPS should trigger warm reset via RMR_EL3.
  ******************************************************************************/
-#define L2_RESET_DONE_REG			0x10D12218
+#define L2_RESET_DONE_REG					0x10D12218
 
 #endif /* PLAT_SOCFPGA_DEF_H */
diff --git a/plat/intel/soc/agilex5/platform.mk b/plat/intel/soc/agilex5/platform.mk
index 7302164..409c7b1 100644
--- a/plat/intel/soc/agilex5/platform.mk
+++ b/plat/intel/soc/agilex5/platform.mk
@@ -1,6 +1,7 @@
 #
 # Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
 # Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+# Copyright (c) 2024, Altera Corporation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -8,6 +9,7 @@
 PLAT_INCLUDES		:=	\
 			-Iplat/intel/soc/agilex5/include/		\
 			-Iplat/intel/soc/common/drivers/		\
+			-Iplat/intel/soc/common/lib/sha/		\
 			-Iplat/intel/soc/common/include/
 
 # GIC-600 configuration
@@ -33,6 +35,7 @@
 			plat/intel/soc/common/drivers/sdmmc/sdmmc.c			\
 			plat/intel/soc/common/drivers/ddr/ddr.c			\
 			plat/intel/soc/common/drivers/nand/nand.c			\
+			plat/intel/soc/common/lib/sha/sha.c				\
 			plat/intel/soc/common/socfpga_delay_timer.c
 
 BL2_SOURCES		+=	\
@@ -107,6 +110,11 @@
 $(eval $(call add_define,ARM_LINUX_KERNEL_AS_BL33))
 $(eval $(call add_define,ARM_PRELOADED_DTB_BASE))
 
+# Configs for VAB Authentication
+SOCFPGA_SECURE_VAB_AUTH  := 	0
+$(eval $(call assert_boolean,SOCFPGA_SECURE_VAB_AUTH))
+$(eval $(call add_define,SOCFPGA_SECURE_VAB_AUTH))
+
 PROGRAMMABLE_RESET_ADDRESS	:= 0
 RESET_TO_BL2			:= 1
-BL2_INV_DCACHE			:= 0
+BL2_INV_DCACHE			:= 0
\ No newline at end of file
diff --git a/plat/intel/soc/common/include/socfpga_vab.h b/plat/intel/soc/common/include/socfpga_vab.h
index f6081df..4587d7f 100644
--- a/plat/intel/soc/common/include/socfpga_vab.h
+++ b/plat/intel/soc/common/include/socfpga_vab.h
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2020-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,10 +8,28 @@
 #ifndef SOCFPGA_VAB_H
 #define SOCFPGA_VAB_H
 
-
 #include <stdlib.h>
 #include "socfpga_fcs.h"
 
+/* Macros */
+#define IS_BYTE_ALIGNED(x, a)		(((x) & ((typeof(x))(a) - 1)) == 0)
+#define BYTE_ALIGN(x, a)		__ALIGN_MASK((x), (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask)		(((x)+(mask))&~(mask))
+#define VAB_CERT_HEADER_SIZE		sizeof(struct fcs_hps_vab_certificate_header)
+#define VAB_CERT_MAGIC_OFFSET		offsetof(struct fcs_hps_vab_certificate_header, d)
+#define VAB_CERT_FIT_SHA384_OFFSET	offsetof(struct fcs_hps_vab_certificate_data, fcs_sha384[0])
+#define SDM_CERT_MAGIC_NUM		0x25D04E7F
+#define CHUNKSZ_PER_WD_RESET		(256 * 1024)
+#define CCERT_CMD_TEST_PGM_MASK		0x80000000 //TODO: ATF FDT location
+
+/* SHA related return Macro */
+#define ENOVABCERT			1 /* VAB certificate not available */
+#define EIMGERR				2 /* Image format/size not valid */
+#define ETIMEOUT			3 /* Execution timeout */
+#define EPROCESS			4 /* Process error */
+#define EKEYREJECTED			5 /* Key was rejected by service */
+#define EINITREJECTED			6 /* VAB init was rejected */
+
 struct fcs_hps_vab_certificate_data {
 	uint32_t vab_cert_magic_num;			/* offset 0x10 */
 	uint32_t flags;
@@ -27,28 +46,9 @@
 	/* keychain starts at offset 0x50 */
 };
 
-/* Macros */
-#define IS_BYTE_ALIGNED(x, a)		(((x) & ((typeof(x))(a) - 1)) == 0)
-#define BYTE_ALIGN(x, a)		__ALIGN_MASK((x), (typeof(x))(a)-1)
-#define __ALIGN_MASK(x, mask)		(((x)+(mask))&~(mask))
-#define VAB_CERT_HEADER_SIZE		sizeof(struct fcs_hps_vab_certificate_header)
-#define VAB_CERT_MAGIC_OFFSET		offsetof(struct fcs_hps_vab_certificate_header, d)
-#define VAB_CERT_FIT_SHA384_OFFSET	offsetof(struct fcs_hps_vab_certificate_data, fcs_sha384[0])
-#define SDM_CERT_MAGIC_NUM		0x25D04E7F
-#define CHUNKSZ_PER_WD_RESET		(256 * 1024)
-
-/* SHA related return Macro */
-#define ENOVABIMG		1 /* VAB certificate not available */
-#define EIMGERR		2 /* Image format/size not valid */
-#define ETIMEOUT		3 /* Execution timeout */
-#define EPROCESS		4 /* Process error */
-#define EKEYREJECTED		5/* Key was rejected by service */
-
 /* Function Definitions */
-static size_t get_img_size(uint8_t *img_buf, size_t img_buf_sz);
-int socfpga_vendor_authentication(void **p_image, size_t *p_size);
-static uint32_t get_unaligned_le32(const void *p);
-void sha384_csum_wd(const unsigned char *input, unsigned int ilen,
-unsigned char *output, unsigned int chunk_sz);
-
+size_t get_img_size(uint8_t *img_buf, size_t img_buf_sz);
+uint32_t get_unaligned_le32(const void *p);
+int socfpga_vab_authentication(void **p_image, size_t *p_size);
+int socfpga_vab_init(unsigned int image_id);
 #endif
diff --git a/plat/intel/soc/common/lib/sha/sha.c b/plat/intel/soc/common/lib/sha/sha.c
new file mode 100644
index 0000000..9a6adc6
--- /dev/null
+++ b/plat/intel/soc/common/lib/sha/sha.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+#include <tools_share/firmware_image_package.h>
+
+#include "sha.h"
+#include "wdt/watchdog.h"
+
+/* SHA384 certificate ID */
+#define SHA384_H0	0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1	0x629a292a367cd507ULL
+#define SHA384_H2	0x9159015a3070dd17ULL
+#define SHA384_H3	0x152fecd8f70e5939ULL
+#define SHA384_H4	0x67332667ffc00b31ULL
+#define SHA384_H5	0x8eb44a8768581511ULL
+#define SHA384_H6	0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7	0x47b5481dbefa4fa4ULL
+
+/* SHA512 certificate ID */
+#define SHA512_H0	0x6a09e667f3bcc908ULL
+#define SHA512_H1	0xbb67ae8584caa73bULL
+#define SHA512_H2	0x3c6ef372fe94f82bULL
+#define SHA512_H3	0xa54ff53a5f1d36f1ULL
+#define SHA512_H4	0x510e527fade682d1ULL
+#define SHA512_H5	0x9b05688c2b3e6c1fULL
+#define SHA512_H6	0x1f83d9abfb41bd6bULL
+#define SHA512_H7	0x5be0cd19137e2179ULL
+
+void sha384_init(sha512_context *ctx)
+{
+	ctx->state[0] = SHA384_H0;
+	ctx->state[1] = SHA384_H1;
+	ctx->state[2] = SHA384_H2;
+	ctx->state[3] = SHA384_H3;
+	ctx->state[4] = SHA384_H4;
+	ctx->state[5] = SHA384_H5;
+	ctx->state[6] = SHA384_H6;
+	ctx->state[7] = SHA384_H7;
+	ctx->count[0] = ctx->count[1] = 0;
+}
+
+void sha384_update(sha512_context *ctx, const uint8_t *input, uint32_t length)
+{
+	sha512_base_do_update(ctx, input, length);
+}
+
+void sha384_finish(sha512_context *ctx, uint8_t digest[SHA384_SUM_LEN])
+{
+	int i;
+
+	sha512_base_do_finalize(ctx);
+	for (i = 0; i < SHA384_SUM_LEN / sizeof(uint64_t); i++)
+		PUT_UINT64_BE(ctx->state[i], digest, i * 8);
+}
+
+void sha384_start(const unsigned char *input, unsigned int len,
+					unsigned char *output, unsigned int chunk_sz)
+{
+	/* TODO: Shall trigger watchdog for each chuck byte. */
+	sha512_context ctx;
+	const unsigned char *end;
+	unsigned char *curr;
+	int chunk;
+
+	sha384_init(&ctx);
+
+	curr = (unsigned char *)input;
+	end = input + len;
+	while (curr < end) {
+		chunk = end - curr;
+		if (chunk > chunk_sz) {
+			chunk = chunk_sz;
+		}
+		sha384_update(&ctx, curr, chunk);
+		curr += chunk;
+		watchdog_sw_rst();
+	}
+
+	sha384_finish(&ctx, output);
+}
+
+/* SHA512 Start Here */
+void sha512_init(sha512_context *ctx)
+{
+	ctx->state[0] = SHA512_H0;
+	ctx->state[1] = SHA512_H1;
+	ctx->state[2] = SHA512_H2;
+	ctx->state[3] = SHA512_H3;
+	ctx->state[4] = SHA512_H4;
+	ctx->state[5] = SHA512_H5;
+	ctx->state[6] = SHA512_H6;
+	ctx->state[7] = SHA512_H7;
+	ctx->count[0] = ctx->count[1] = 0;
+}
+
+void sha512_update(sha512_context *ctx, const uint8_t *input, uint32_t length)
+{
+	sha512_base_do_update(ctx, input, length);
+}
+
+void sha512_finish(sha512_context *ctx, uint8_t digest[SHA512_SUM_LEN])
+{
+	int i;
+
+	sha512_base_do_finalize(ctx);
+	for (i = 0; i < SHA512_SUM_LEN / sizeof(uint64_t); i++)
+		PUT_UINT64_BE(ctx->state[i], digest, i * 8);
+}
+
+void sha512_start(const unsigned char *input, unsigned int len, unsigned char *output)
+{
+	/* TODO: Shall trigger watchdog for each chuck byte. */
+	sha512_context ctx;
+
+	sha384_init(&ctx);
+	sha512_update(&ctx, input, len);
+	sha512_finish(&ctx, output);
+}
+
+void sha512_transform(uint64_t *state, const uint8_t *input)
+{
+	uint64_t a, b, c, d, e, f, g, h, t1, t2;
+
+	int i;
+	uint64_t W[16];
+
+	/* load the state into our registers */
+	a = state[0];   b = state[1];   c = state[2];   d = state[3];
+	e = state[4];   f = state[5];   g = state[6];   h = state[7];
+
+	/* now iterate */
+	for (i = 0 ; i < 80; i += 8) {
+		if (!(i & 8)) {
+			int j;
+
+			if (i < 16) {
+				/* load the input */
+				for (j = 0; j < 16; j++)
+					LOAD_OP(i + j, W, input);
+			} else {
+				for (j = 0; j < 16; j++) {
+					BLEND_OP(i + j, W);
+				}
+			}
+		}
+
+		t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i & 15)];
+		t2 = e0(a) + Maj(a, b, c);    d += t1;    h = t1 + t2;
+		t1 = g + e1(d) + Ch(d, e, f) + sha512_K[i+1] + W[(i & 15) + 1];
+		t2 = e0(h) + Maj(h, a, b);    c += t1;    g = t1 + t2;
+		t1 = f + e1(c) + Ch(c, d, e) + sha512_K[i+2] + W[(i & 15) + 2];
+		t2 = e0(g) + Maj(g, h, a);    b += t1;    f = t1 + t2;
+		t1 = e + e1(b) + Ch(b, c, d) + sha512_K[i+3] + W[(i & 15) + 3];
+		t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
+		t1 = d + e1(a) + Ch(a, b, c) + sha512_K[i+4] + W[(i & 15) + 4];
+		t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
+		t1 = c + e1(h) + Ch(h, a, b) + sha512_K[i+5] + W[(i & 15) + 5];
+		t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
+		t1 = b + e1(g) + Ch(g, h, a) + sha512_K[i+6] + W[(i & 15) + 6];
+		t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
+		t1 = a + e1(f) + Ch(f, g, h) + sha512_K[i+7] + W[(i & 15) + 7];
+		t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
+	}
+
+	state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+	state[4] += e; state[5] += f; state[6] += g; state[7] += h;
+
+	/* erase our data */
+	a = b = c = d = e = f = g = h = t1 = t2 = 0;
+}
+
+void sha512_block_fn(sha512_context *sst, const uint8_t *src,
+				    int blocks)
+{
+	while (blocks--) {
+		sha512_transform(sst->state, src);
+		src += SHA512_BLOCK_SIZE;
+	}
+}
+
+
+void sha512_base_do_finalize(sha512_context *sctx)
+{
+	const int bit_offset = SHA512_BLOCK_SIZE - sizeof(uint64_t[2]);
+	uint64_t *bits = (uint64_t *)(sctx->buf + bit_offset);
+	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+	sctx->buf[partial++] = 0x80;
+	if (partial > bit_offset) {
+		memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
+		partial = 0;
+
+		sha512_block_fn(sctx, sctx->buf, 1);
+	}
+
+	memset(sctx->buf + partial, 0x0, bit_offset - partial);
+	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+	bits[1] = cpu_to_be64(sctx->count[0] << 3);
+
+	sha512_block_fn(sctx, sctx->buf, 1);
+}
+
+void sha512_base_do_update(sha512_context *sctx,
+					const uint8_t *data,
+					unsigned int len)
+{
+	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+	sctx->count[0] += len;
+	if (sctx->count[0] < len)
+		sctx->count[1]++;
+
+	if (((partial + len) >= SHA512_BLOCK_SIZE)) {
+		int blocks;
+
+		if (partial) {
+			int p = SHA512_BLOCK_SIZE - partial;
+
+			memcpy(sctx->buf + partial, data, p);
+			data += p;
+			len -= p;
+
+			sha512_block_fn(sctx, sctx->buf, 1);
+		}
+
+		blocks = len / SHA512_BLOCK_SIZE;
+		len %= SHA512_BLOCK_SIZE;
+
+		if (blocks) {
+			sha512_block_fn(sctx, data, blocks);
+			data += blocks * SHA512_BLOCK_SIZE;
+		}
+		partial = 0;
+	}
+	if (len)
+		memcpy(sctx->buf + partial, data, len);
+}
diff --git a/plat/intel/soc/common/lib/sha/sha.h b/plat/intel/soc/common/lib/sha/sha.h
new file mode 100644
index 0000000..41b5fa8
--- /dev/null
+++ b/plat/intel/soc/common/lib/sha/sha.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_SHA_H
+#define SOCFPGA_SHA_H
+
+#include <stdlib.h>
+
+
+#define SHA384_SUM_LEN			48
+#define SHA384_DER_LEN			19
+#define SHA512_SUM_LEN			64
+#define SHA512_DER_LEN			19
+#define SHA512_BLOCK_SIZE		128
+
+
+/* MACRO Function */
+#define GET_UINT64_BE(n, b, i) {	\
+	(n) = ((unsigned long long) (b)[(i)] << 56) |\
+	((unsigned long long) (b)[(i) + 1] << 48) |\
+	((unsigned long long) (b)[(i) + 2] << 40) |\
+	((unsigned long long) (b)[(i) + 3] << 32) |\
+	((unsigned long long) (b)[(i) + 4] << 24) |\
+	((unsigned long long) (b)[(i) + 5] << 16) |\
+	((unsigned long long) (b)[(i) + 6] <<  8) |\
+	((unsigned long long) (b)[(i) + 7]);\
+}
+
+#define PUT_UINT64_BE(n, b, i) {	\
+	(b)[(i)] = (unsigned char) ((n) >> 56);\
+	(b)[(i) + 1] = (unsigned char) ((n) >> 48);\
+	(b)[(i) + 2] = (unsigned char) ((n) >> 40);\
+	(b)[(i) + 3] = (unsigned char) ((n) >> 32);\
+	(b)[(i) + 4] = (unsigned char) ((n) >> 24);\
+	(b)[(i) + 5] = (unsigned char) ((n) >> 16);\
+	(b)[(i) + 6] = (unsigned char) ((n) >>  8);\
+	(b)[(i) + 7] = (unsigned char) ((n));\
+}
+
+#define e0(x)		(ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39))
+#define e1(x)		(ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41))
+#define s0(x)		(ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7))
+#define s1(x)		(ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6))
+
+/* Inline Function Definitions */
+/* ror64() to rotate its right in 64 bits. */
+static inline uint64_t ror64(uint64_t input, unsigned int shift)
+{
+	return (input >> (shift & 63)) | (input << ((-shift) & 63));
+}
+
+static inline uint64_t Ch(uint64_t x, uint64_t y, uint64_t z)
+{
+	return z ^ (x & (y ^ z));
+}
+
+static inline uint64_t Maj(uint64_t x, uint64_t y, uint64_t z)
+{
+	return (x & y) | (z & (x | y));
+}
+
+static inline void LOAD_OP(int I, uint64_t *W, const uint8_t *input)
+{
+	GET_UINT64_BE(W[I], input, I*8);
+}
+
+static inline void BLEND_OP(int I, uint64_t *W)
+{
+	W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]);
+}
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+inline uint32_t le32_to_cpue(const uint32_t *p)
+{
+	return (uint32_t)*p;
+}
+#else
+inline uint32_t le32_to_cpue(const uint32_t *p)
+{
+	return swab32(*p);
+}
+#endif
+
+static const uint64_t sha512_K[80] = {
+	0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
+	0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
+	0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
+	0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+	0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
+	0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
+	0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL,
+	0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+	0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL,
+	0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
+	0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL,
+	0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+	0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL,
+	0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
+	0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
+	0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+	0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL,
+	0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
+	0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL,
+	0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+	0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL,
+	0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
+	0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL,
+	0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+	0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
+	0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
+	0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
+};
+
+#define	__cpu_to_le64(x)	((__force __le64)(__u64)(x))
+
+#define _uswap_64(x, sfx)	\
+	((((x) & 0xff00000000000000##sfx) >> 56) |\
+	(((x) & 0x00ff000000000000##sfx) >> 40) |\
+	(((x) & 0x0000ff0000000000##sfx) >> 24) |\
+	(((x) & 0x000000ff00000000##sfx) >>  8) |\
+	(((x) & 0x00000000ff000000##sfx) <<  8) |\
+	(((x) & 0x0000000000ff0000##sfx) << 24) |\
+	(((x) & 0x000000000000ff00##sfx) << 40) |\
+	(((x) & 0x00000000000000ff##sfx) << 56))
+
+#if defined(__GNUC__)
+#define uswap_64(x)		_uswap_64(x, ull)
+#else
+#define uswap_64(x)		_uswap_64(x)
+#endif
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define cpu_to_be64(x)		uswap_64(x)
+#else
+#define cpu_to_be64(x)		(x)
+#endif
+
+typedef struct {
+	uint64_t state[SHA512_SUM_LEN / 8];
+	uint64_t count[2];
+	uint8_t buf[SHA512_BLOCK_SIZE];
+} sha512_context;
+
+/* Function Definitions */
+/* SHA384 Start Here */
+void sha384_init(sha512_context *ctx);
+void sha384_update(sha512_context *ctx, const uint8_t *input, uint32_t length);
+void sha384_finish(sha512_context *ctx, uint8_t digest[SHA384_SUM_LEN]);
+void sha384_start(const unsigned char *input, unsigned int len,
+			unsigned char *output, unsigned int chunk_sz);
+/* SHA512 Start Here */
+void sha512_init(sha512_context *ctx);
+void sha512_update(sha512_context *ctx, const uint8_t *input, uint32_t length);
+void sha512_finish(sha512_context *ctx, uint8_t digest[SHA512_SUM_LEN]);
+void sha512_start(const unsigned char *input, unsigned int len,
+			unsigned char *output);
+void sha512_transform(uint64_t *state, const uint8_t *input);
+void sha512_block_fn(sha512_context *sst, const uint8_t *src, int blocks);
+void sha512_base_do_finalize(sha512_context *sctx);
+void sha512_base_do_update(sha512_context *sctx, const uint8_t *data,
+				unsigned int len);
+
+#endif
diff --git a/plat/intel/soc/common/socfpga_sip_svc.c b/plat/intel/soc/common/socfpga_sip_svc.c
index 2f88c86..5dfbc14 100644
--- a/plat/intel/soc/common/socfpga_sip_svc.c
+++ b/plat/intel/soc/common/socfpga_sip_svc.c
@@ -419,6 +419,7 @@
 	case(SOCFPGA_MEMCTRL(DIAGINTTEST)):	/* DIAGINTTEST */
 	case(SOCFPGA_MEMCTRL(DERRADDRA)):	/* DERRADDRA */
 
+	case(SOCFPGA_ECC_QSPI(INITSTAT)):	/* ECC_QSPI_INITSTAT */
 	case(SOCFPGA_SYSMGR(EMAC_0)):	/* EMAC0 */
 	case(SOCFPGA_SYSMGR(EMAC_1)):	/* EMAC1 */
 	case(SOCFPGA_SYSMGR(EMAC_2)):	/* EMAC2 */
diff --git a/plat/intel/soc/common/socfpga_vab.c b/plat/intel/soc/common/socfpga_vab.c
index e16610c..d1734c8 100644
--- a/plat/intel/soc/common/socfpga_vab.c
+++ b/plat/intel/soc/common/socfpga_vab.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
  * Copyright (c) 2019-2023, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, Altera Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,18 +9,23 @@
 #include <assert.h>
 #include <errno.h>
 
+#include "../lib/sha/sha.h"
+
 #include <arch_helpers.h>
+#include <common/bl_common.h>
 #include <common/debug.h>
+#include <common/desc_image_load.h>
 #include <common/tbbr/tbbr_img_def.h>
 #include <drivers/delay_timer.h>
 #include <lib/mmio.h>
 #include <lib/utils.h>
+#include <plat/common/platform.h>
 #include <tools_share/firmware_image_package.h>
 
 #include "socfpga_mailbox.h"
 #include "socfpga_vab.h"
 
-static size_t get_img_size(uint8_t *img_buf, size_t img_buf_sz)
+size_t get_img_size(uint8_t *img_buf, size_t img_buf_sz)
 {
 	uint8_t *img_buf_end = img_buf + img_buf_sz;
 	uint32_t cert_sz = get_unaligned_le32(img_buf_end - sizeof(uint32_t));
@@ -35,9 +41,33 @@
 	return 0;
 }
 
+int socfpga_vab_init(unsigned int image_id)
+{
+	int ret = 0;
+	size_t image_size;
+	void *image_base_ptr;
+	/*
+	 * Get information about the images to load.
+	 */
+	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+
+	assert(bl_mem_params);
 
+	if (bl_mem_params == NULL) {
+		ERROR("SOCFPGA VAB Init failed\n");
+		return -EINITREJECTED;
+	}
+
+	if ((image_id == BL31_IMAGE_ID) || (image_id == BL33_IMAGE_ID)) {
+		image_base_ptr = (void *)bl_mem_params->image_info.image_base;
+		image_size = bl_mem_params->image_info.image_size;
+		ret = socfpga_vab_authentication(&image_base_ptr, &image_size);
+	}
+
+	return ret;
+}
 
-int socfpga_vendor_authentication(void **p_image, size_t *p_size)
+int socfpga_vab_authentication(void **p_image, size_t *p_size)
 {
 	int retry_count = 20;
 	uint8_t hash384[FCS_SHA384_WORD_SIZE];
@@ -46,51 +76,46 @@
 	uint8_t *cert_hash_ptr, *mbox_relocate_data_addr;
 	uint32_t resp = 0, resp_len = 1;
 	int ret = 0;
+	uint8_t u8_buf_static[MBOX_DATA_MAX_LEN];
+
+	mbox_relocate_data_addr = u8_buf_static;
 
 	img_addr = (uintptr_t)*p_image;
 	img_sz = get_img_size((uint8_t *)img_addr, *p_size);
 
 	if (!img_sz) {
-		NOTICE("VAB certificate not found in image!\n");
-		return -ENOVABIMG;
+		ERROR("VAB certificate not found in image!\n");
+		return -ENOVABCERT;
 	}
 
 	if (!IS_BYTE_ALIGNED(img_sz, sizeof(uint32_t))) {
-		NOTICE("Image size (%d bytes) not aliged to 4 bytes!\n", img_sz);
+		ERROR("Image size (%d bytes) not aliged to 4 bytes!\n", img_sz);
 		return -EIMGERR;
 	}
 
 	/* Generate HASH384 from the image */
-	/* TODO: This part need to cross check !!!!!! */
-	sha384_csum_wd((uint8_t *)img_addr, img_sz, hash384, CHUNKSZ_PER_WD_RESET);
-	cert_hash_ptr = (uint8_t *)(img_addr + img_sz +
-	VAB_CERT_MAGIC_OFFSET + VAB_CERT_FIT_SHA384_OFFSET);
+	sha384_start((uint8_t *)img_addr, img_sz, hash384, CHUNKSZ_PER_WD_RESET);
+	cert_hash_ptr = (uint8_t *)(img_addr + img_sz + VAB_CERT_MAGIC_OFFSET +
+								VAB_CERT_FIT_SHA384_OFFSET);
 
 	/*
 	 * Compare the SHA384 found in certificate against the SHA384
 	 * calculated from image
 	 */
 	if (memcmp(hash384, cert_hash_ptr, FCS_SHA384_WORD_SIZE)) {
-		NOTICE("SHA384 does not match!\n");
+		ERROR("SHA384 does not match!\n");
 		return -EKEYREJECTED;
 	}
 
-
 	mbox_data_addr = img_addr + img_sz - sizeof(uint32_t);
 	/* Size in word (32bits) */
 	mbox_data_sz = (BYTE_ALIGN(*p_size - img_sz, sizeof(uint32_t))) >> 2;
 
-	NOTICE("mbox_data_addr = %lx    mbox_data_sz = %d\n", mbox_data_addr, mbox_data_sz);
-
-	/* TODO: This part need to cross check !!!!!! */
-	// mbox_relocate_data_addr = (uint8_t *)malloc(mbox_data_sz * sizeof(uint32_t));
-	// if (!mbox_relocate_data_addr) {
-		// NOTICE("Cannot allocate memory for VAB certificate relocation!\n");
-		// return -ENOMEM;
-	// }
+	VERBOSE("mbox_data_addr = %lx    mbox_data_sz = %d\n", mbox_data_addr, mbox_data_sz);
 
 	memcpy(mbox_relocate_data_addr, (uint8_t *)mbox_data_addr, mbox_data_sz * sizeof(uint32_t));
-	*(uint32_t *)mbox_relocate_data_addr = 0;
+
+	*((unsigned int *)mbox_relocate_data_addr) = CCERT_CMD_TEST_PGM_MASK;
 
 	do {
 		/* Invoke SMC call to ATF to send the VAB certificate to SDM */
@@ -109,7 +134,6 @@
 	/* Free the relocate certificate memory space */
 	zeromem((void *)&mbox_relocate_data_addr, sizeof(uint32_t));
 
-
 	/* Exclude the size of the VAB certificate from image size */
 	*p_size = img_sz;
 
@@ -121,40 +145,32 @@
 		 /* 0x85 = Not allowed under current security setting */
 		if (ret == MBOX_RESP_ERR(0x85)) {
 			/* SDM bypass authentication */
-			NOTICE("Image Authentication bypassed at address\n");
+			ERROR("Image Authentication bypassed at address\n");
 			return 0;
 		}
-		NOTICE("VAB certificate authentication failed in SDM\n");
+		ERROR("VAB certificate authentication failed in SDM\n");
 		/* 0x1FF = The device is busy */
 		if (ret == MBOX_RESP_ERR(0x1FF)) {
-			NOTICE("Operation timed out\n");
+			ERROR("Operation timed out\n");
 			return -ETIMEOUT;
 		} else if (ret == MBOX_WRONG_ID) {
-			NOTICE("No such process\n");
+			ERROR("No such process\n");
 			return -EPROCESS;
 		}
+		return -EAUTH;
 	} else {
 		/* If Certificate Process Status has error */
 		if (resp) {
-			NOTICE("VAB certificate execution format error\n");
+			ERROR("VAB certificate execution format error\n");
 			return -EIMGERR;
 		}
 	}
 
-	NOTICE("Image Authentication bypassed at address\n");
+	NOTICE("%s 0x%lx (%d bytes)\n", "Image Authentication passed at address", img_addr, img_sz);
 	return ret;
-
-}
-
-static uint32_t get_unaligned_le32(const void *p)
-{
-	/* TODO: Temp for testing */
-	//return le32_to_cpup((__le32 *)p);
-	return 0;
 }
 
-void sha384_csum_wd(const unsigned char *input, unsigned int ilen,
-		unsigned char *output, unsigned int chunk_sz)
+uint32_t get_unaligned_le32(const void *p)
 {
-	/* TODO: Update sha384 start, update and finish */
+	return le32_to_cpue((uint32_t *)p);
 }
diff --git a/plat/nxp/s32/s32g274ardb2/include/platform_def.h b/plat/nxp/s32/s32g274ardb2/include/platform_def.h
index bdfeee2..1a4c495 100644
--- a/plat/nxp/s32/s32g274ardb2/include/platform_def.h
+++ b/plat/nxp/s32/s32g274ardb2/include/platform_def.h
@@ -54,8 +54,7 @@
 /* Console settings */
 #define UART_BASE			UL(0x401C8000)
 #define UART_BAUDRATE			U(115200)
-/* FIRC clock */
-#define UART_CLOCK_HZ			U(48000000)
+#define UART_CLOCK_HZ			U(125000000)
 
 #define S32G_FIP_BASE			UL(0x34100000)
 #define S32G_FIP_SIZE			UL(0x100000)
diff --git a/plat/nxp/s32/s32g274ardb2/plat_helpers.S b/plat/nxp/s32/s32g274ardb2/plat_helpers.S
index 193c884..10c0035 100644
--- a/plat/nxp/s32/s32g274ardb2/plat_helpers.S
+++ b/plat/nxp/s32/s32g274ardb2/plat_helpers.S
@@ -38,6 +38,8 @@
 
 /* void plat_crash_console_flush(void); */
 func plat_crash_console_flush
+	mov_imm	x0, UART_BASE
+	b	console_linflex_core_flush
 	ret
 endfunc plat_crash_console_flush
 
diff --git a/plat/qti/common/src/aarch64/qti_kryo4_gold.S b/plat/qti/common/src/aarch64/qti_kryo4_gold.S
index 9bcdf54..49b7cf0 100644
--- a/plat/qti/common/src/aarch64/qti_kryo4_gold.S
+++ b/plat/qti/common/src/aarch64/qti_kryo4_gold.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -41,16 +41,6 @@
 	ret
 endfunc qti_kryo4_gold_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Kryo4 Gold. Must follow AAPCS.
- */
-func qti_kryo4_gold_errata_report
-	/* TODO : Need to add support. Required only for debug bl31 image.*/
-	ret
-endfunc qti_kryo4_gold_errata_report
-#endif
-
 /* ---------------------------------------------
  * This function provides kryo4_gold specific
  * register information for crash reporting.
diff --git a/plat/qti/common/src/aarch64/qti_kryo4_silver.S b/plat/qti/common/src/aarch64/qti_kryo4_silver.S
index 36374b7..4a98912 100644
--- a/plat/qti/common/src/aarch64/qti_kryo4_silver.S
+++ b/plat/qti/common/src/aarch64/qti_kryo4_silver.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -35,17 +35,6 @@
 	ret
 endfunc qti_kryo4_silver_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Kryo4 Silver. Must follow AAPCS.
- */
-func qti_kryo4_silver_errata_report
-	/* TODO : Need to add support. Required only for debug bl31 image.*/
-	ret
-endfunc qti_kryo4_silver_errata_report
-#endif
-
-
 /* ---------------------------------------------
  * This function provides kryo4_silver specific
  * register information for crash reporting.
diff --git a/plat/qti/common/src/aarch64/qti_kryo6_gold.S b/plat/qti/common/src/aarch64/qti_kryo6_gold.S
index 577e7ff..5f9463f 100644
--- a/plat/qti/common/src/aarch64/qti_kryo6_gold.S
+++ b/plat/qti/common/src/aarch64/qti_kryo6_gold.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -41,16 +41,6 @@
 	ret
 endfunc qti_kryo6_gold_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Kryo4 Gold. Must follow AAPCS.
- */
-func qti_kryo6_gold_errata_report
-	/* TODO : Need to add support. Required only for debug bl31 image.*/
-	ret
-endfunc qti_kryo6_gold_errata_report
-#endif
-
 /* ---------------------------------------------
  * This function provides kryo4_gold specific
  * register information for crash reporting.
diff --git a/plat/qti/common/src/aarch64/qti_kryo6_silver.S b/plat/qti/common/src/aarch64/qti_kryo6_silver.S
index 6ad0bca..4a54a64 100644
--- a/plat/qti/common/src/aarch64/qti_kryo6_silver.S
+++ b/plat/qti/common/src/aarch64/qti_kryo6_silver.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -35,17 +35,6 @@
 	ret
 endfunc qti_kryo6_silver_cluster_pwr_dwn
 
-#if REPORT_ERRATA
-/*
- * Errata printing function for Kryo4 Silver. Must follow AAPCS.
- */
-func qti_kryo6_silver_errata_report
-	/* TODO : Need to add support. Required only for debug bl31 image.*/
-	ret
-endfunc qti_kryo6_silver_errata_report
-#endif
-
-
 /* ---------------------------------------------
  * This function provides kryo4_silver specific
  * register information for crash reporting.
diff --git a/plat/rockchip/common/include/plat_pm_helpers.h b/plat/rockchip/common/include/plat_pm_helpers.h
new file mode 100644
index 0000000..2204a65
--- /dev/null
+++ b/plat/rockchip/common/include/plat_pm_helpers.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_PM_HELPERS_H
+#define PLAT_PM_HELPERS_H
+
+#include <stdint.h>
+
+/**
+ * Use this macro to define a register region.
+ * start: start offset from the base address.
+ * end: end offset from the base address.
+ * stride: stride of registers in region.
+ * base: base address of registers in region.
+ * wmsk: write mask of registers in region.
+ */
+#define REG_REGION(_start, _end, _stride, _base, _wmsk)	\
+{							\
+	.start = (_base) + (_start),			\
+	.end   = (_base) + (_end),			\
+	.stride   = _stride,				\
+	.wmsk  = _wmsk					\
+}
+
+struct reg_region {
+	/* Start address of region */
+	uint32_t start;
+	/* End address of region */
+	uint32_t end;
+	/* Stride of registers in region */
+	uint32_t stride;
+	/* Write mask of registers in region */
+	uint32_t wmsk;
+	/* Buffer to save/restore registers in region */
+	uint32_t *buf;
+};
+
+void rockchip_alloc_region_mem(struct reg_region *rgns, uint32_t rgn_num);
+void rockchip_reg_rgn_save(struct reg_region *rgns, uint32_t rgn_num);
+void rockchip_reg_rgn_restore(struct reg_region *rgns, uint32_t rgn_num);
+void rockchip_reg_rgn_restore_reverse(struct reg_region *rgns, uint32_t rgn_num);
+void rockchip_regs_dump(uint32_t base,
+			uint32_t start_offset,
+			uint32_t end_offset,
+			uint32_t stride);
+void rockchip_dump_reg_rgns(struct reg_region *rgns, uint32_t rgn_num);
+
+#endif /* PLAT_PM_HELPERS_H */
diff --git a/plat/rockchip/common/include/plat_private.h b/plat/rockchip/common/include/plat_private.h
index 44a0c46..1e13a9e 100644
--- a/plat/rockchip/common/include/plat_private.h
+++ b/plat/rockchip/common/include/plat_private.h
@@ -141,6 +141,7 @@
 uint32_t rockchip_get_uart_baudrate(void);
 uint32_t rockchip_get_uart_clock(void);
 
+void rockchip_init_scmi_server(void);
 #endif /* __ASSEMBLER__ */
 
 /******************************************************************************
diff --git a/plat/rockchip/common/include/rockchip_sip_svc.h b/plat/rockchip/common/include/rockchip_sip_svc.h
index 340d653..8836f9b 100644
--- a/plat/rockchip/common/include/rockchip_sip_svc.h
+++ b/plat/rockchip/common/include/rockchip_sip_svc.h
@@ -11,6 +11,7 @@
 #define SIP_SVC_CALL_COUNT		0x8200ff00
 #define SIP_SVC_UID			0x8200ff01
 #define SIP_SVC_VERSION			0x8200ff03
+#define RK_SIP_SCMI_AGENT0		0x82000010
 
 /* rockchip SiP Service Calls version numbers */
 #define RK_SIP_SVC_VERSION_MAJOR	0x0
diff --git a/plat/rockchip/common/plat_pm_helpers.c b/plat/rockchip/common/plat_pm_helpers.c
new file mode 100644
index 0000000..191b0ca
--- /dev/null
+++ b/plat/rockchip/common/plat_pm_helpers.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <drivers/console.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#include <plat_pm_helpers.h>
+
+#define ROCKCHIP_PM_REG_REGION_MEM_LEN	(ROCKCHIP_PM_REG_REGION_MEM_SIZE / sizeof(uint32_t))
+
+/* REG region */
+#define RGN_LEN(_rgn)		(((_rgn)->end - (_rgn)->start) / (_rgn)->stride + 1)
+
+#ifndef ROCKCHIP_PM_REG_REGION_MEM_SIZE
+#define ROCKCHIP_PM_REG_REGION_MEM_SIZE		0
+#endif
+
+#ifdef ROCKCHIP_REG_RGN_MEM_BASE
+static uint32_t *region_mem = (uint32_t *)ROCKCHIP_REG_RGN_MEM_BASE;
+#else
+static uint32_t region_mem[ROCKCHIP_PM_REG_REGION_MEM_LEN];
+#endif
+
+static int region_mem_idx;
+
+static int alloc_region_mem(uint32_t *buf, int max_len,
+			    struct reg_region *rgns, uint32_t rgn_num)
+{
+	int i;
+	int total_len = 0, len = 0;
+	struct reg_region *r = rgns;
+
+	assert(buf && rgns && rgn_num);
+
+	for (i = 0; i < rgn_num; i++, r++) {
+		if (total_len < max_len)
+			r->buf = &buf[total_len];
+
+		len = RGN_LEN(r);
+		total_len += len;
+	}
+
+	if (total_len > max_len) {
+		ERROR("%s The buffer remain length:%d is too small for region:0x%x, at least %d\n",
+		      __func__, max_len, rgns[0].start, total_len);
+		panic();
+	}
+
+	return total_len;
+}
+
+/**
+ * Alloc memory to reg_region->buf from region_mem.
+ * @rgns - struct reg_region array.
+ * @rgn_num - struct reg_region array length.
+ */
+void rockchip_alloc_region_mem(struct reg_region *rgns, uint32_t rgn_num)
+{
+	int max_len = 0, len;
+
+	assert(rgns && rgn_num);
+
+	max_len = ROCKCHIP_PM_REG_REGION_MEM_LEN - region_mem_idx;
+
+	len = alloc_region_mem(region_mem + region_mem_idx, max_len,
+			       rgns, rgn_num);
+
+	region_mem_idx += len;
+}
+
+/**
+ * Save (reg_region->start ~ reg_region->end) to reg_region->buf.
+ * @rgns - struct reg_region array.
+ * @rgn_num - struct reg_region array length.
+ */
+void rockchip_reg_rgn_save(struct reg_region *rgns, uint32_t rgn_num)
+{
+	struct reg_region *r;
+	uint32_t addr;
+	int i, j;
+
+	assert(rgns && rgn_num);
+
+	for (i = 0; i < rgn_num; i++) {
+		r = &rgns[i];
+		for (j = 0, addr = r->start; addr <= r->end; addr += r->stride, j++)
+			r->buf[j] = mmio_read_32(addr);
+	}
+}
+
+/**
+ * Restore reg_region->buf to (reg_region->start ~ reg_region->end).
+ * @rgns - struct reg_region array.
+ * @rgn_num - struct reg_region array length.
+ */
+void rockchip_reg_rgn_restore(struct reg_region *rgns, uint32_t rgn_num)
+{
+	struct reg_region *r;
+	uint32_t addr;
+	int i, j;
+
+	assert(rgns && rgn_num);
+
+	for (i = 0; i < rgn_num; i++) {
+		r = &rgns[i];
+		for (j = 0, addr = r->start; addr <= r->end; addr += r->stride, j++)
+			mmio_write_32(addr, r->buf[j] | r->wmsk);
+
+		dsb();
+	}
+}
+
+/**
+ * Restore reg_region->buf to (reg_region->start ~ reg_region->end) reversely.
+ * @rgns - struct reg_region array.
+ * @rgn_num - struct reg_region array length.
+ */
+void rockchip_reg_rgn_restore_reverse(struct reg_region *rgns, uint32_t rgn_num)
+{
+	struct reg_region *r;
+	uint32_t addr;
+	int i, j;
+
+	assert(rgns && rgn_num);
+
+	for (i = rgn_num - 1; i >= 0; i--) {
+		r = &rgns[i];
+		j = RGN_LEN(r) - 1;
+		for (addr = r->end; addr >= r->start; addr -= r->stride, j--)
+			mmio_write_32(addr, r->buf[j] | r->wmsk);
+
+		dsb();
+	}
+}
+
+static void rockchip_print_hex(uint32_t val)
+{
+	int i;
+	unsigned char tmp;
+
+	putchar('0');
+	putchar('x');
+	for (i = 0; i < 8; val <<= 4, ++i) {
+		tmp = (val & 0xf0000000) >> 28;
+		if (tmp < 10)
+			putchar('0' + tmp);
+		else
+			putchar('a' + tmp - 10);
+	}
+}
+
+/**
+ * Dump registers (base + start_offset ~ base + end_offset)
+ * @base - the base addr of the register.
+ * @start_offset - the start offset to dump.
+ * @end_offset - the end offset to dump.
+ * @stride - the stride of the registers.
+ */
+void rockchip_regs_dump(uint32_t base,
+			uint32_t start_offset,
+			uint32_t end_offset,
+			uint32_t stride)
+{
+	uint32_t i;
+
+	for (i = start_offset; i <= end_offset; i += stride) {
+		if ((i - start_offset) % 16 == 0) {
+			putchar('\n');
+			rockchip_print_hex(base + i);
+			putchar(':');
+			putchar(' ');
+			putchar(' ');
+			putchar(' ');
+			putchar(' ');
+		}
+		rockchip_print_hex(mmio_read_32(base + i));
+		putchar(' ');
+		putchar(' ');
+		putchar(' ');
+		putchar(' ');
+	}
+	putchar('\n');
+}
+
+/**
+ * Dump reg regions
+ * @rgns - struct reg_region array.
+ * @rgn_num - struct reg_region array length.
+ */
+void rockchip_dump_reg_rgns(struct reg_region *rgns, uint32_t rgn_num)
+{
+	struct reg_region *r;
+	int i;
+
+	assert(rgns && rgn_num);
+
+	for (i = 0; i < rgn_num; i++) {
+		r = &rgns[i];
+		rockchip_regs_dump(0x0, r->start, r->end, r->stride);
+	}
+}
diff --git a/plat/rockchip/common/scmi/scmi.c b/plat/rockchip/common/scmi/scmi.c
new file mode 100644
index 0000000..5c43c51
--- /dev/null
+++ b/plat/rockchip/common/scmi/scmi.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <drivers/scmi-msg.h>
+#include <drivers/scmi.h>
+#include <lib/utils.h>
+#include <lib/utils_def.h>
+
+#define MAX_PROTOCOL_IN_LIST		8U
+
+static const char vendor[] = "rockchip";
+static const char sub_vendor[] = "";
+
+#pragma weak rockchip_scmi_protocol_table
+
+const uint8_t rockchip_scmi_protocol_table[1][MAX_PROTOCOL_IN_LIST] = {
+	{
+		SCMI_PROTOCOL_ID_CLOCK,
+		SCMI_PROTOCOL_ID_RESET_DOMAIN,
+		0
+	}
+};
+
+const char *plat_scmi_vendor_name(void)
+{
+	return vendor;
+}
+
+const char *plat_scmi_sub_vendor_name(void)
+{
+	return sub_vendor;
+}
+
+size_t plat_scmi_protocol_count(void)
+{
+	unsigned int count = 0U;
+	const uint8_t *protocol_list = rockchip_scmi_protocol_table[0];
+
+	while (protocol_list[count])
+		count++;
+
+	return count;
+}
+
+const uint8_t *plat_scmi_protocol_list(unsigned int agent_id)
+{
+	assert(agent_id < ARRAY_SIZE(rockchip_scmi_protocol_table));
+
+	return rockchip_scmi_protocol_table[agent_id];
+}
+
+static struct scmi_msg_channel scmi_channel[] = {
+	[0] = {
+		.shm_addr = SMT_BUFFER0_BASE,
+		.shm_size = SMT_BUF_SLOT_SIZE,
+	},
+
+#ifdef SMT_BUFFER1_BASE
+	[1] = {
+		.shm_addr = SMT_BUFFER1_BASE,
+		.shm_size = SMT_BUF_SLOT_SIZE,
+	},
+#endif
+};
+
+struct scmi_msg_channel *plat_scmi_get_channel(unsigned int agent_id)
+{
+	assert(agent_id < ARRAY_SIZE(scmi_channel));
+
+	return &scmi_channel[agent_id];
+}
+
+#pragma weak rockchip_init_scmi_server
+
+void rockchip_init_scmi_server(void)
+{
+	size_t i;
+
+	for (i = 0U; i < ARRAY_SIZE(scmi_channel); i++)
+		scmi_smt_init_agent_channel(&scmi_channel[i]);
+}
diff --git a/plat/rockchip/common/scmi/scmi_clock.c b/plat/rockchip/common/scmi/scmi_clock.c
new file mode 100644
index 0000000..d6d4b37
--- /dev/null
+++ b/plat/rockchip/common/scmi/scmi_clock.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/scmi-msg.h>
+#include <drivers/scmi.h>
+
+#include "scmi_clock.h"
+
+#pragma weak rockchip_scmi_clock_count
+#pragma weak rockchip_scmi_get_clock
+
+size_t rockchip_scmi_clock_count(unsigned int agent_id __unused)
+{
+	return 0;
+}
+
+rk_scmi_clock_t *rockchip_scmi_get_clock(uint32_t agent_id __unused,
+					 uint32_t scmi_id __unused)
+{
+	return NULL;
+}
+
+size_t plat_scmi_clock_count(unsigned int agent_id)
+{
+	return rockchip_scmi_clock_count(agent_id);
+}
+
+const char *plat_scmi_clock_get_name(unsigned int agent_id,
+				     unsigned int scmi_id)
+{
+	rk_scmi_clock_t *clock;
+
+	clock = rockchip_scmi_get_clock(agent_id, scmi_id);
+	if (clock == NULL)
+		return NULL;
+
+	return clock->name;
+}
+
+int32_t plat_scmi_clock_rates_array(unsigned int agent_id,
+				    unsigned int scmi_id,
+				    unsigned long *rates,
+				    size_t *nb_elts,
+				    uint32_t start_idx)
+{
+	uint32_t i;
+	unsigned long *rate_table;
+	rk_scmi_clock_t *clock;
+
+	clock = rockchip_scmi_get_clock(agent_id, scmi_id);
+	if (clock == NULL)
+		return SCMI_NOT_FOUND;
+
+	rate_table = clock->rate_table;
+	if (rate_table == NULL)
+		return SCMI_NOT_SUPPORTED;
+
+	if (rates == 0) {
+		*nb_elts = clock->rate_cnt;
+		goto out;
+	}
+
+	if (start_idx + *nb_elts > clock->rate_cnt)
+		return SCMI_OUT_OF_RANGE;
+
+	for (i = 0; i < *nb_elts; i++)
+		rates[i] = rate_table[start_idx + i];
+
+out:
+	return SCMI_SUCCESS;
+}
+
+int32_t plat_scmi_clock_rates_by_step(unsigned int agent_id __unused,
+				      unsigned int scmi_id __unused,
+				      unsigned long *steps __unused)
+{
+	return SCMI_NOT_SUPPORTED;
+}
+
+unsigned long plat_scmi_clock_get_rate(unsigned int agent_id,
+				       unsigned int scmi_id)
+{
+	rk_scmi_clock_t *clock;
+	unsigned long rate = 0;
+
+	clock = rockchip_scmi_get_clock(agent_id, scmi_id);
+	if (clock == NULL)
+		return 0;
+
+	if (clock->clk_ops && clock->clk_ops->get_rate)
+		rate = clock->clk_ops->get_rate(clock);
+
+	/* return cur_rate if no get_rate ops or get_rate return 0 */
+	if (rate == 0)
+		rate = clock->cur_rate;
+
+	return rate;
+}
+
+int32_t plat_scmi_clock_set_rate(unsigned int agent_id,
+				 unsigned int scmi_id,
+				 unsigned long rate)
+{
+	rk_scmi_clock_t *clock;
+	int32_t status = 0;
+
+	clock = rockchip_scmi_get_clock(agent_id, scmi_id);
+	if (clock == NULL)
+		return SCMI_NOT_FOUND;
+
+	if (clock->clk_ops && clock->clk_ops->set_rate) {
+		status = clock->clk_ops->set_rate(clock, rate);
+		if (status == SCMI_SUCCESS)
+			clock->cur_rate = rate;
+	} else {
+		status = SCMI_NOT_SUPPORTED;
+	}
+
+	return status;
+}
+
+int32_t plat_scmi_clock_get_state(unsigned int agent_id,
+				  unsigned int scmi_id)
+{
+	rk_scmi_clock_t *clock;
+
+	clock = rockchip_scmi_get_clock(agent_id, scmi_id);
+	if (clock == NULL)
+		return 0;
+
+	return clock->enable;
+}
+
+int32_t plat_scmi_clock_set_state(unsigned int agent_id,
+				  unsigned int scmi_id,
+				  bool enable_not_disable)
+{
+	rk_scmi_clock_t *clock;
+	int32_t status = 0;
+
+	clock = rockchip_scmi_get_clock(agent_id, scmi_id);
+	if (clock == NULL)
+		return SCMI_NOT_FOUND;
+
+	if (clock->clk_ops && clock->clk_ops->set_status) {
+		status = clock->clk_ops->set_status(clock, enable_not_disable);
+		if (status == SCMI_SUCCESS)
+			clock->enable = enable_not_disable;
+	} else {
+		status = SCMI_NOT_SUPPORTED;
+	}
+
+	return status;
+}
diff --git a/plat/rockchip/common/scmi/scmi_clock.h b/plat/rockchip/common/scmi/scmi_clock.h
new file mode 100644
index 0000000..e640fe1
--- /dev/null
+++ b/plat/rockchip/common/scmi/scmi_clock.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RK_SCMI_CLOCK_H
+#define RK_SCMI_CLOCK_H
+
+#include <stdint.h>
+
+#include <common.h>
+
+struct rk_scmi_clock;
+
+struct rk_clk_ops {
+	unsigned long (*get_rate)(struct rk_scmi_clock *clock);
+	int (*set_rate)(struct rk_scmi_clock *clock, unsigned long rate);
+	int (*set_status)(struct rk_scmi_clock *clock, bool status);
+};
+
+typedef struct rk_scmi_clock {
+	char name[SCMI_CLOCK_NAME_LENGTH_MAX];
+	uint8_t enable;
+	int8_t is_security;
+	uint32_t id;
+	uint32_t rate_cnt;
+	uint64_t cur_rate;
+	uint32_t enable_count;
+	const struct rk_clk_ops *clk_ops;
+	unsigned long *rate_table;
+} rk_scmi_clock_t;
+
+/*
+ * Return number of clock controllers for an agent
+ * @agent_id: SCMI agent ID
+ * Return number of clock controllers
+ */
+size_t rockchip_scmi_clock_count(unsigned int agent_id);
+
+/*
+ * Get rk_scmi_clock_t point
+ * @agent_id: SCMI agent ID
+ * @scmi_id: SCMI clock ID
+ * Return a rk_scmi_clock_t point
+ */
+rk_scmi_clock_t *rockchip_scmi_get_clock(uint32_t agent_id,
+					 uint32_t scmi_id);
+
+#endif /* RK_SCMI_CLOCK_H */
diff --git a/plat/rockchip/common/scmi/scmi_rstd.c b/plat/rockchip/common/scmi/scmi_rstd.c
new file mode 100644
index 0000000..35c5e0b
--- /dev/null
+++ b/plat/rockchip/common/scmi/scmi_rstd.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/scmi-msg.h>
+#include <drivers/scmi.h>
+
+#include "scmi_rstd.h"
+
+#pragma weak rockchip_scmi_rstd_count
+#pragma weak rockchip_scmi_get_rstd
+
+size_t rockchip_scmi_rstd_count(unsigned int agent_id __unused)
+{
+	return 0U;
+}
+
+rk_scmi_rstd_t *rockchip_scmi_get_rstd(unsigned int agent_id __unused,
+				       unsigned int scmi_id __unused)
+{
+	return NULL;
+}
+
+size_t plat_scmi_rstd_count(unsigned int agent_id)
+{
+	return rockchip_scmi_rstd_count(agent_id);
+}
+
+const char *plat_scmi_rstd_get_name(unsigned int agent_id,
+				    unsigned int scmi_id)
+{
+	rk_scmi_rstd_t *rstd;
+
+	rstd = rockchip_scmi_get_rstd(agent_id, scmi_id);
+	if (rstd == NULL)
+		return NULL;
+
+	return rstd->name;
+}
+
+int32_t plat_scmi_rstd_autonomous(unsigned int agent_id,
+				  unsigned int scmi_id,
+				  unsigned int state)
+{
+	rk_scmi_rstd_t *rstd;
+
+	rstd = rockchip_scmi_get_rstd(agent_id, scmi_id);
+	if (rstd == NULL)
+		return SCMI_NOT_FOUND;
+
+	if ((rstd->rstd_ops && rstd->rstd_ops->reset_auto) != 0)
+		return rstd->rstd_ops->reset_auto(rstd, state);
+	else
+		return SCMI_NOT_SUPPORTED;
+}
+
+int32_t plat_scmi_rstd_set_state(unsigned int agent_id,
+				 unsigned int scmi_id,
+				 bool assert_not_deassert)
+{
+	rk_scmi_rstd_t *rstd;
+
+	rstd = rockchip_scmi_get_rstd(agent_id, scmi_id);
+	if (rstd == NULL)
+		return SCMI_NOT_FOUND;
+
+	if ((rstd->rstd_ops && rstd->rstd_ops->reset_explicit) != 0)
+		return rstd->rstd_ops->reset_explicit(rstd,
+						      assert_not_deassert);
+	else
+		return SCMI_NOT_SUPPORTED;
+}
diff --git a/plat/rockchip/common/scmi/scmi_rstd.h b/plat/rockchip/common/scmi/scmi_rstd.h
new file mode 100644
index 0000000..1af5881
--- /dev/null
+++ b/plat/rockchip/common/scmi/scmi_rstd.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RK_SCMI_RESET_DOMAIN_H
+#define RK_SCMI_RESET_DOMAIN_H
+
+#include <stdint.h>
+
+#include <common.h>
+
+struct rk_scmi_rstd;
+
+struct rk_scmi_rstd_ops {
+	int (*reset_auto)(struct rk_scmi_rstd *rstd, uint32_t state);
+	int (*reset_explicit)(struct rk_scmi_rstd *rstd, bool assert_not_deassert);
+};
+
+typedef struct rk_scmi_rstd {
+	char name[SCMI_RESET_DOMAIN_ATTR_NAME_SZ];
+	uint32_t id;
+	uint32_t attribute;
+	uint32_t latency;
+	struct rk_scmi_rstd_ops *rstd_ops;
+} rk_scmi_rstd_t;
+
+/*
+ * Return number of reset domain for an agent
+ * @agent_id: SCMI agent ID
+ * Return number of reset domain
+ */
+size_t rockchip_scmi_rstd_count(unsigned int agent_id);
+
+/*
+ * Get rk_scmi_rstd_t point
+ * @agent_id: SCMI agent ID
+ * @scmi_id: SCMI rstd ID
+ * Return a rk_scmi_rstd_t point
+ */
+rk_scmi_rstd_t *rockchip_scmi_get_rstd(unsigned int agent_id,
+				       unsigned int scmi_id);
+
+#endif /* RK_SCMI_RESET_DOMAIN_H */
diff --git a/plat/rockchip/rk3588/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3588/drivers/pmu/plat_pmu_macros.S
new file mode 100644
index 0000000..c278899
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/pmu/plat_pmu_macros.S
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+.globl	clst_warmboot_data
+
+.macro	func_rockchip_clst_warmboot
+.endm
+
+.macro rockchip_clst_warmboot_data
+clst_warmboot_data:
+	.rept	PLATFORM_CLUSTER_COUNT
+	.word	0
+	.endr
+.endm
diff --git a/plat/rockchip/rk3588/drivers/pmu/pm_pd_regs.c b/plat/rockchip/rk3588/drivers/pmu/pm_pd_regs.c
new file mode 100644
index 0000000..78e8500
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/pmu/pm_pd_regs.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <drivers/console.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmu.h>
+
+#include <plat_pm_helpers.h>
+#include <plat_private.h>
+#include <pm_pd_regs.h>
+#include <soc.h>
+
+#define WMSK_VAL		0xffff0000
+
+static struct reg_region qos_reg_rgns[] = {
+	[QOS_ISP0_MWO] = REG_REGION(0x08, 0x18, 4, 0xfdf40500, 0),
+	[QOS_ISP0_MRO] = REG_REGION(0x08, 0x18, 4, 0xfdf40400, 0),
+	[QOS_ISP1_MWO] = REG_REGION(0x08, 0x18, 4, 0xfdf41000, 0),
+	[QOS_ISP1_MRO] = REG_REGION(0x08, 0x18, 4, 0xfdf41100, 0),
+	[QOS_VICAP_M0] = REG_REGION(0x08, 0x18, 4, 0xfdf40600, 0),
+	[QOS_VICAP_M1] = REG_REGION(0x08, 0x18, 4, 0xfdf40800, 0),
+	[QOS_FISHEYE0] = REG_REGION(0x08, 0x18, 4, 0xfdf40000, 0),
+	[QOS_FISHEYE1] = REG_REGION(0x08, 0x18, 4, 0xfdf40200, 0),
+	[QOS_VOP_M0] = REG_REGION(0x08, 0x18, 4, 0xfdf82000, 0),
+	[QOS_VOP_M1] = REG_REGION(0x08, 0x18, 4, 0xfdf82200, 0),
+	[QOS_RKVDEC0] = REG_REGION(0x08, 0x18, 4, 0xfdf62000, 0),
+	[QOS_RKVDEC1] = REG_REGION(0x08, 0x18, 4, 0xfdf63000, 0),
+	[QOS_AV1] = REG_REGION(0x08, 0x18, 4, 0xfdf64000, 0),
+	[QOS_RKVENC0_M0RO] = REG_REGION(0x08, 0x18, 4, 0xfdf60000, 0),
+	[QOS_RKVENC0_M1RO] = REG_REGION(0x08, 0x18, 4, 0xfdf60200, 0),
+	[QOS_RKVENC0_M2WO] = REG_REGION(0x08, 0x18, 4, 0xfdf60400, 0),
+	[QOS_RKVENC1_M0RO] = REG_REGION(0x08, 0x18, 4, 0xfdf61000, 0),
+	[QOS_RKVENC1_M1RO] = REG_REGION(0x08, 0x18, 4, 0xfdf61200, 0),
+	[QOS_RKVENC1_M2WO] = REG_REGION(0x08, 0x18, 4, 0xfdf61400, 0),
+	[QOS_DSU_M0] = REG_REGION(0x08, 0x18, 4, 0xfe008000, 0),
+	[QOS_DSU_M1] = REG_REGION(0x08, 0x18, 4, 0xfe008800, 0),
+	[QOS_DSU_MP] = REG_REGION(0x08, 0x18, 4, 0xfdf34200, 0),
+	[QOS_DEBUG] = REG_REGION(0x08, 0x18, 4, 0xfdf34400, 0),
+	[QOS_GPU_M0] = REG_REGION(0x08, 0x18, 4, 0xfdf35000, 0),
+	[QOS_GPU_M1] = REG_REGION(0x08, 0x18, 4, 0xfdf35200, 0),
+	[QOS_GPU_M2] = REG_REGION(0x08, 0x18, 4, 0xfdf35400, 0),
+	[QOS_GPU_M3] = REG_REGION(0x08, 0x18, 4, 0xfdf35600, 0),
+	[QOS_NPU1] = REG_REGION(0x08, 0x18, 4, 0xfdf70000, 0),
+	[QOS_NPU0_MRO] = REG_REGION(0x08, 0x18, 4, 0xfdf72200, 0),
+	[QOS_NPU2] = REG_REGION(0x08, 0x18, 4, 0xfdf71000, 0),
+	[QOS_NPU0_MWR] = REG_REGION(0x08, 0x18, 4, 0xfdf72000, 0),
+	[QOS_MCU_NPU] = REG_REGION(0x08, 0x18, 4, 0xfdf72400, 0),
+	[QOS_JPEG_DEC] = REG_REGION(0x08, 0x18, 4, 0xfdf66200, 0),
+	[QOS_JPEG_ENC0] = REG_REGION(0x08, 0x18, 4, 0xfdf66400, 0),
+	[QOS_JPEG_ENC1] = REG_REGION(0x08, 0x18, 4, 0xfdf66600, 0),
+	[QOS_JPEG_ENC2] = REG_REGION(0x08, 0x18, 4, 0xfdf66800, 0),
+	[QOS_JPEG_ENC3] = REG_REGION(0x08, 0x18, 4, 0xfdf66a00, 0),
+	[QOS_RGA2_MRO] = REG_REGION(0x08, 0x18, 4, 0xfdf66c00, 0),
+	[QOS_RGA2_MWO] = REG_REGION(0x08, 0x18, 4, 0xfdf66e00, 0),
+	[QOS_RGA3_0] = REG_REGION(0x08, 0x18, 4, 0xfdf67000, 0),
+	[QOS_RGA3_1] = REG_REGION(0x08, 0x18, 4, 0xfdf36000, 0),
+	[QOS_VDPU] = REG_REGION(0x08, 0x18, 4, 0xfdf67200, 0),
+	[QOS_IEP] = REG_REGION(0x08, 0x18, 4, 0xfdf66000, 0),
+	[QOS_HDCP0] = REG_REGION(0x08, 0x18, 4, 0xfdf80000, 0),
+	[QOS_HDCP1] = REG_REGION(0x08, 0x18, 4, 0xfdf81000, 0),
+	[QOS_HDMIRX] = REG_REGION(0x08, 0x18, 4, 0xfdf81200, 0),
+	[QOS_GIC600_M0] = REG_REGION(0x08, 0x18, 4, 0xfdf3a000, 0),
+	[QOS_GIC600_M1] = REG_REGION(0x08, 0x18, 4, 0xfdf3a200, 0),
+	[QOS_MMU600PCIE_TCU] = REG_REGION(0x08, 0x18, 4, 0xfdf3a400, 0),
+	[QOS_MMU600PHP_TBU] = REG_REGION(0x08, 0x18, 4, 0xfdf3a600, 0),
+	[QOS_MMU600PHP_TCU] = REG_REGION(0x08, 0x18, 4, 0xfdf3a800, 0),
+	[QOS_USB3_0] = REG_REGION(0x08, 0x18, 4, 0xfdf3e200, 0),
+	[QOS_USB3_1] = REG_REGION(0x08, 0x18, 4, 0xfdf3e000, 0),
+	[QOS_USBHOST_0] = REG_REGION(0x08, 0x18, 4, 0xfdf3e400, 0),
+	[QOS_USBHOST_1] = REG_REGION(0x08, 0x18, 4, 0xfdf3e600, 0),
+	[QOS_EMMC] = REG_REGION(0x08, 0x18, 4, 0xfdf38200, 0),
+	[QOS_FSPI] = REG_REGION(0x08, 0x18, 4, 0xfdf38000, 0),
+	[QOS_SDIO] = REG_REGION(0x08, 0x18, 4, 0xfdf39000, 0),
+	[QOS_DECOM] = REG_REGION(0x08, 0x18, 4, 0xfdf32000, 0),
+	[QOS_DMAC0] = REG_REGION(0x08, 0x18, 4, 0xfdf32200, 0),
+	[QOS_DMAC1] = REG_REGION(0x08, 0x18, 4, 0xfdf32400, 0),
+	[QOS_DMAC2] = REG_REGION(0x08, 0x18, 4, 0xfdf32600, 0),
+	[QOS_GIC600M] = REG_REGION(0x08, 0x18, 4, 0xfdf32800, 0),
+	[QOS_DMA2DDR] = REG_REGION(0x08, 0x18, 4, 0xfdf52000, 0),
+	[QOS_MCU_DDR] = REG_REGION(0x08, 0x18, 4, 0xfdf52200, 0),
+	[QOS_VAD] = REG_REGION(0x08, 0x18, 4, 0xfdf3b200, 0),
+	[QOS_MCU_PMU] = REG_REGION(0x08, 0x18, 4, 0xfdf3b000, 0),
+	[QOS_CRYPTOS] = REG_REGION(0x08, 0x18, 4, 0xfdf3d200, 0),
+	[QOS_CRYPTONS] = REG_REGION(0x08, 0x18, 4, 0xfdf3d000, 0),
+	[QOS_DCF] = REG_REGION(0x08, 0x18, 4, 0xfdf3d400, 0),
+	[QOS_SDMMC] = REG_REGION(0x08, 0x18, 4, 0xfdf3d800, 0),
+};
+
+static struct reg_region pd_crypto_reg_rgns[] = {
+	/* SECURE CRU */
+	REG_REGION(0x300, 0x30c, 4, SCRU_BASE, WMSK_VAL),
+	REG_REGION(0x800, 0x80c, 4, SCRU_BASE, WMSK_VAL),
+	REG_REGION(0xa00, 0xa0c, 4, SCRU_BASE, WMSK_VAL),
+	REG_REGION(0xd00, 0xd20, 8, SCRU_BASE, 0),
+	REG_REGION(0xd04, 0xd24, 8, SCRU_BASE, WMSK_VAL),
+
+	/* S TIMER0 6 channel */
+	REG_REGION(0x00, 0x04, 4, STIMER0_BASE + 0x00, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER0_BASE + 0x00, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER0_BASE + 0x20, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER0_BASE + 0x20, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER0_BASE + 0x40, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER0_BASE + 0x40, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER0_BASE + 0x60, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER0_BASE + 0x60, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER0_BASE + 0x80, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER0_BASE + 0x80, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER0_BASE + 0xa0, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER0_BASE + 0xa0, 0),
+
+	/* S TIMER1 6 channel */
+	REG_REGION(0x00, 0x04, 4, STIMER1_BASE + 0x00, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER1_BASE + 0x00, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER1_BASE + 0x20, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER1_BASE + 0x20, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER1_BASE + 0x40, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER1_BASE + 0x40, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER1_BASE + 0x60, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER1_BASE + 0x60, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER1_BASE + 0x80, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER1_BASE + 0x80, 0),
+	REG_REGION(0x00, 0x04, 4, STIMER1_BASE + 0xa0, 0),
+	REG_REGION(0x10, 0x10, 4, STIMER1_BASE + 0xa0, 0),
+
+	/* wdt_s */
+	REG_REGION(0x04, 0x04, 4, WDT_S_BASE, 0),
+	REG_REGION(0x00, 0x00, 4, WDT_S_BASE, 0),
+};
+
+static struct reg_region pd_dsu_reg_rgns[] = {
+	/* dsucru */
+	REG_REGION(0x040, 0x054, 4, DSUCRU_BASE, WMSK_VAL),
+	REG_REGION(0x300, 0x31c, 4, DSUCRU_BASE, WMSK_VAL),
+	REG_REGION(0x800, 0x80c, 4, DSUCRU_BASE, WMSK_VAL),
+	REG_REGION(0xa00, 0xa0c, 4, DSUCRU_BASE, WMSK_VAL),
+	REG_REGION(0xd00, 0xd20, 8, DSUCRU_BASE, 0),
+	REG_REGION(0xd04, 0xd24, 8, DSUCRU_BASE, WMSK_VAL),
+	REG_REGION(0xf00, 0xf00, 4, DSUCRU_BASE, WMSK_VAL),
+	REG_REGION(0xf10, 0xf1c, 4, DSUCRU_BASE, 0),
+
+	/* bcore0cru */
+	REG_REGION(0x000, 0x014, 4, BIGCORE0CRU_BASE, WMSK_VAL),
+	REG_REGION(0x300, 0x304, 4, BIGCORE0CRU_BASE, WMSK_VAL),
+	REG_REGION(0x800, 0x804, 4, BIGCORE0CRU_BASE, WMSK_VAL),
+	REG_REGION(0xa00, 0xa04, 4, BIGCORE0CRU_BASE, WMSK_VAL),
+	REG_REGION(0xcc0, 0xcc4, 4, BIGCORE0CRU_BASE, 0),
+	REG_REGION(0xd00, 0xd00, 4, BIGCORE0CRU_BASE, 0),
+	REG_REGION(0xd04, 0xd04, 4, BIGCORE0CRU_BASE, WMSK_VAL),
+
+	/* bcore1cru */
+	REG_REGION(0x020, 0x034, 4, BIGCORE1CRU_BASE, WMSK_VAL),
+	REG_REGION(0x300, 0x304, 4, BIGCORE1CRU_BASE, WMSK_VAL),
+	REG_REGION(0x800, 0x804, 4, BIGCORE1CRU_BASE, WMSK_VAL),
+	REG_REGION(0xa00, 0xa04, 4, BIGCORE1CRU_BASE, WMSK_VAL),
+	REG_REGION(0xcc0, 0xcc4, 4, BIGCORE1CRU_BASE, 0),
+	REG_REGION(0xd00, 0xd00, 4, BIGCORE1CRU_BASE, 0),
+	REG_REGION(0xd04, 0xd04, 4, BIGCORE1CRU_BASE, WMSK_VAL),
+
+	/* dsugrf */
+	REG_REGION(0x00, 0x18, 4, DSUGRF_BASE, WMSK_VAL),
+	REG_REGION(0x20, 0x20, 4, DSUGRF_BASE, WMSK_VAL),
+	REG_REGION(0x28, 0x30, 4, DSUGRF_BASE, WMSK_VAL),
+	REG_REGION(0x38, 0x38, 4, DSUGRF_BASE, WMSK_VAL),
+
+	/* lcore_grf */
+	REG_REGION(0x20, 0x20, 4, LITCOREGRF_BASE, WMSK_VAL),
+	REG_REGION(0x28, 0x30, 4, LITCOREGRF_BASE, WMSK_VAL),
+
+	/* bcore0_grf */
+	REG_REGION(0x20, 0x20, 4, BIGCORE0GRF_BASE, WMSK_VAL),
+	REG_REGION(0x28, 0x30, 4, BIGCORE0GRF_BASE, WMSK_VAL),
+
+	/* bcore1_grf */
+	REG_REGION(0x20, 0x20, 4, BIGCORE1GRF_BASE, WMSK_VAL),
+	REG_REGION(0x28, 0x28, 4, BIGCORE1GRF_BASE, WMSK_VAL),
+};
+
+static struct reg_region pd_php_reg_rgns[] = {
+	/* php_grf */
+	REG_REGION(0x000, 0x008, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x014, 0x024, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x028, 0x02c, 4, PHPGRF_BASE, 0),
+	REG_REGION(0x030, 0x03c, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x05c, 0x060, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x064, 0x068, 4, PHPGRF_BASE, 0),
+	REG_REGION(0x070, 0x070, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x074, 0x0d0, 4, PHPGRF_BASE, 0),
+	REG_REGION(0x0d4, 0x0d4, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x0e0, 0x0e0, 4, PHPGRF_BASE, 0),
+	REG_REGION(0x0e4, 0x0ec, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x100, 0x104, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x10c, 0x130, 4, PHPGRF_BASE, 0),
+	REG_REGION(0x138, 0x138, 4, PHPGRF_BASE, WMSK_VAL),
+	REG_REGION(0x144, 0x168, 4, PHPGRF_BASE, 0),
+	REG_REGION(0x16c, 0x174, 4, PHPGRF_BASE, WMSK_VAL),
+
+	/* php_cru */
+	REG_REGION(0x200, 0x218, 4, PHP_CRU_BASE, WMSK_VAL),
+	REG_REGION(0x800, 0x800, 4, PHP_CRU_BASE, WMSK_VAL),
+	REG_REGION(0xa00, 0xa00, 4, PHP_CRU_BASE, WMSK_VAL),
+
+	/* pcie3phy_grf_cmn_con0 */
+	REG_REGION(0x00, 0x00, 4, PCIE3PHYGRF_BASE, WMSK_VAL),
+};
+
+void qos_save(void)
+{
+	uint32_t pmu_pd_st0 = mmio_read_32(PMU_BASE + PMU2_PWR_GATE_ST(0));
+
+	if ((pmu_pd_st0 & BIT(PD_GPU)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_GPU_M0], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_GPU_M1], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_GPU_M2], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_GPU_M3], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_NPU1)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_NPU1], 1);
+	if ((pmu_pd_st0 & BIT(PD_NPU2)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_NPU2], 1);
+	if ((pmu_pd_st0 & BIT(PD_NPUTOP)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_NPU0_MRO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_NPU0_MWR], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_MCU_NPU], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_RKVDEC1)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RKVDEC1], 1);
+	if ((pmu_pd_st0 & BIT(PD_RKVDEC0)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RKVDEC0], 1);
+
+	if ((pmu_pd_st0 & BIT(PD_VENC1)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RKVENC1_M0RO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RKVENC1_M1RO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RKVENC1_M2WO], 1);
+	}
+	if ((pmu_pd_st0 & BIT(PD_VENC0)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RKVENC0_M0RO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RKVENC0_M1RO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RKVENC0_M2WO], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_RGA30)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RGA3_0], 1);
+	if ((pmu_pd_st0 & BIT(PD_AV1)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_AV1], 1);
+	if ((pmu_pd_st0 & BIT(PD_VDPU)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_JPEG_DEC], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_JPEG_ENC0], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_JPEG_ENC1], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_JPEG_ENC2], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_JPEG_ENC3], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RGA2_MRO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RGA2_MWO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_VDPU], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_IEP], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_VO0)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_HDCP0], 1);
+	if ((pmu_pd_st0 & BIT(PD_VO1)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_HDCP1], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_HDMIRX], 1);
+	}
+	if ((pmu_pd_st0 & BIT(PD_VOP)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_VOP_M0], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_VOP_M1], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_FEC)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_FISHEYE0], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_FISHEYE1], 1);
+	}
+	if ((pmu_pd_st0 & BIT(PD_ISP1)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_ISP1_MWO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_ISP1_MRO], 1);
+	}
+	if ((pmu_pd_st0 & BIT(PD_VI)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_ISP0_MWO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_ISP0_MRO], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_VICAP_M0], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_VICAP_M1], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_RGA31)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_RGA3_1], 1);
+
+	if ((pmu_pd_st0 & BIT(PD_USB)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_USB3_0], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_USB3_1], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_USBHOST_0], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_USBHOST_1], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_PHP)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_GIC600_M0], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_GIC600_M1], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_MMU600PCIE_TCU], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_MMU600PHP_TBU], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_MMU600PHP_TCU], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_SDIO)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_SDIO], 1);
+	if ((pmu_pd_st0 & BIT(PD_NVM0)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_FSPI], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_EMMC], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_SDMMC)) == 0)
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_SDMMC], 1);
+
+	if ((pmu_pd_st0 & BIT(PD_CRYPTO)) == 0) {
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_CRYPTONS], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_CRYPTOS], 1);
+		rockchip_reg_rgn_save(&qos_reg_rgns[QOS_DCF], 1);
+	}
+
+	/* PD_DSU */
+	rockchip_reg_rgn_save(&qos_reg_rgns[QOS_DSU_M0], 1);
+	rockchip_reg_rgn_save(&qos_reg_rgns[QOS_DSU_M1], 1);
+	rockchip_reg_rgn_save(&qos_reg_rgns[QOS_DSU_MP], 1);
+	rockchip_reg_rgn_save(&qos_reg_rgns[QOS_DEBUG], 1);
+}
+
+void qos_restore(void)
+{
+	uint32_t pmu_pd_st0 = mmio_read_32(PMU_BASE + PMU2_PWR_GATE_ST(0));
+
+	if ((pmu_pd_st0 & BIT(PD_GPU)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_GPU_M0], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_GPU_M1], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_GPU_M2], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_GPU_M3], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_NPU1)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_NPU1], 1);
+	if ((pmu_pd_st0 & BIT(PD_NPU2)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_NPU2], 1);
+	if ((pmu_pd_st0 & BIT(PD_NPUTOP)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_NPU0_MRO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_NPU0_MWR], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_MCU_NPU], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_RKVDEC1)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RKVDEC1], 1);
+	if ((pmu_pd_st0 & BIT(PD_RKVDEC0)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RKVDEC0], 1);
+
+	if ((pmu_pd_st0 & BIT(PD_VENC1)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RKVENC1_M0RO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RKVENC1_M1RO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RKVENC1_M2WO], 1);
+	}
+	if ((pmu_pd_st0 & BIT(PD_VENC0)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RKVENC0_M0RO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RKVENC0_M1RO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RKVENC0_M2WO], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_RGA30)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RGA3_0], 1);
+	if ((pmu_pd_st0 & BIT(PD_AV1)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_AV1], 1);
+	if ((pmu_pd_st0 & BIT(PD_VDPU)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_JPEG_DEC], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_JPEG_ENC0], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_JPEG_ENC1], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_JPEG_ENC2], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_JPEG_ENC3], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RGA2_MRO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RGA2_MWO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_VDPU], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_IEP], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_VO0)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_HDCP0], 1);
+	if ((pmu_pd_st0 & BIT(PD_VO1)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_HDCP1], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_HDMIRX], 1);
+	}
+	if ((pmu_pd_st0 & BIT(PD_VOP)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_VOP_M0], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_VOP_M1], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_FEC)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_FISHEYE0], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_FISHEYE1], 1);
+	}
+	if ((pmu_pd_st0 & BIT(PD_ISP1)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_ISP1_MWO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_ISP1_MRO], 1);
+	}
+	if ((pmu_pd_st0 & BIT(PD_VI)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_ISP0_MWO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_ISP0_MRO], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_VICAP_M0], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_VICAP_M1], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_RGA31)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_RGA3_1], 1);
+
+	if ((pmu_pd_st0 & BIT(PD_USB)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_USB3_0], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_USB3_1], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_USBHOST_0], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_USBHOST_1], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_PHP)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_GIC600_M0], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_GIC600_M1], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_MMU600PCIE_TCU], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_MMU600PHP_TBU], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_MMU600PHP_TCU], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_SDIO)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_SDIO], 1);
+	if ((pmu_pd_st0 & BIT(PD_NVM0)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_FSPI], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_EMMC], 1);
+	}
+
+	if ((pmu_pd_st0 & BIT(PD_SDMMC)) == 0)
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_SDMMC], 1);
+
+	if ((pmu_pd_st0 & BIT(PD_CRYPTO)) == 0) {
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_CRYPTONS], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_CRYPTOS], 1);
+		rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_DCF], 1);
+	}
+
+	/* PD_DSU */
+	rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_DSU_M0], 1);
+	rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_DSU_M1], 1);
+	rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_DSU_MP], 1);
+	rockchip_reg_rgn_restore(&qos_reg_rgns[QOS_DEBUG], 1);
+}
+
+void pd_crypto_save(void)
+{
+	rockchip_reg_rgn_save(pd_crypto_reg_rgns, ARRAY_SIZE(pd_crypto_reg_rgns));
+}
+
+void pd_crypto_restore(void)
+{
+	rockchip_reg_rgn_restore(pd_crypto_reg_rgns, ARRAY_SIZE(pd_crypto_reg_rgns));
+}
+
+static uint32_t b0_cru_mode;
+static uint32_t b1_cru_mode;
+static uint32_t dsu_cru_mode;
+static uint32_t bcore0_cru_sel_con2, bcore1_cru_sel_con2;
+
+void pd_dsu_core_save(void)
+{
+	b0_cru_mode = mmio_read_32(BIGCORE0CRU_BASE + 0x280);
+	b1_cru_mode = mmio_read_32(BIGCORE1CRU_BASE + 0x280);
+	dsu_cru_mode = mmio_read_32(DSUCRU_BASE + 0x280);
+	bcore0_cru_sel_con2 = mmio_read_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(2));
+	bcore1_cru_sel_con2 = mmio_read_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(2));
+
+	rockchip_reg_rgn_save(pd_dsu_reg_rgns, ARRAY_SIZE(pd_dsu_reg_rgns));
+}
+
+void pd_dsu_core_restore(void)
+{
+	/* switch bcore0/1 pclk root to 24M */
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(2),
+		      BITS_WITH_WMASK(2, 0x3, 0));
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(2),
+		      BITS_WITH_WMASK(2, 0x3, 0));
+
+	/* slow mode */
+	mmio_write_32(BIGCORE0CRU_BASE + 0x280, 0x00030000);
+	mmio_write_32(BIGCORE1CRU_BASE + 0x280, 0x00030000);
+	mmio_write_32(DSUCRU_BASE + 0x280, 0x00030000);
+
+	rockchip_reg_rgn_restore(pd_dsu_reg_rgns, ARRAY_SIZE(pd_dsu_reg_rgns));
+
+	/* trigger dsu/lcore/bcore mem_cfg */
+	mmio_write_32(DSUGRF_BASE + 0x18, BITS_WITH_WMASK(1, 0x1, 14));
+	mmio_write_32(LITCOREGRF_BASE + 0x30, BITS_WITH_WMASK(1, 0x1, 5));
+	mmio_write_32(BIGCORE0GRF_BASE + 0x30, BITS_WITH_WMASK(1, 0x1, 5));
+	mmio_write_32(BIGCORE1GRF_BASE + 0x30, BITS_WITH_WMASK(1, 0x1, 5));
+	udelay(1);
+	mmio_write_32(DSUGRF_BASE + 0x18, BITS_WITH_WMASK(0, 0x1, 14));
+	mmio_write_32(LITCOREGRF_BASE + 0x30, BITS_WITH_WMASK(0, 0x1, 5));
+	mmio_write_32(BIGCORE0GRF_BASE + 0x30, BITS_WITH_WMASK(0, 0x1, 5));
+	mmio_write_32(BIGCORE1GRF_BASE + 0x30, BITS_WITH_WMASK(0, 0x1, 5));
+
+	/* wait lock */
+	pm_pll_wait_lock(BIGCORE0CRU_BASE + 0x00);
+	pm_pll_wait_lock(BIGCORE1CRU_BASE + 0x20);
+	pm_pll_wait_lock(DSUCRU_BASE + 0x40);
+
+	/* restore mode */
+	mmio_write_32(BIGCORE0CRU_BASE + 0x280, WITH_16BITS_WMSK(b0_cru_mode));
+	mmio_write_32(BIGCORE1CRU_BASE + 0x280, WITH_16BITS_WMSK(b1_cru_mode));
+	mmio_write_32(DSUCRU_BASE + 0x280, WITH_16BITS_WMSK(dsu_cru_mode));
+
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(2),
+		      WITH_16BITS_WMSK(bcore0_cru_sel_con2));
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(2),
+		      WITH_16BITS_WMSK(bcore1_cru_sel_con2));
+}
+
+static uint32_t php_ppll_con0;
+
+void pd_php_save(void)
+{
+	php_ppll_con0 = mmio_read_32(PHP_CRU_BASE + 0x200);
+
+	/* php_ppll bypass */
+	mmio_write_32(PHP_CRU_BASE + 0x200, BITS_WITH_WMASK(1u, 1u, 15));
+	dsb();
+	isb();
+	rockchip_reg_rgn_save(pd_php_reg_rgns, ARRAY_SIZE(pd_php_reg_rgns));
+}
+
+void pd_php_restore(void)
+{
+	rockchip_reg_rgn_restore(pd_php_reg_rgns, ARRAY_SIZE(pd_php_reg_rgns));
+
+	pm_pll_wait_lock(PHP_CRU_BASE + 0x200);
+
+	/* restore php_ppll bypass */
+	mmio_write_32(PHP_CRU_BASE + 0x200, WITH_16BITS_WMSK(php_ppll_con0));
+}
+
+void pm_reg_rgns_init(void)
+{
+	rockchip_alloc_region_mem(qos_reg_rgns, ARRAY_SIZE(qos_reg_rgns));
+	rockchip_alloc_region_mem(pd_crypto_reg_rgns, ARRAY_SIZE(pd_crypto_reg_rgns));
+	rockchip_alloc_region_mem(pd_dsu_reg_rgns, ARRAY_SIZE(pd_dsu_reg_rgns));
+	rockchip_alloc_region_mem(pd_php_reg_rgns, ARRAY_SIZE(pd_php_reg_rgns));
+}
diff --git a/plat/rockchip/rk3588/drivers/pmu/pm_pd_regs.h b/plat/rockchip/rk3588/drivers/pmu/pm_pd_regs.h
new file mode 100644
index 0000000..8baf69a
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/pmu/pm_pd_regs.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PM_PD_REGS_H
+#define PM_PD_REGS_H
+
+#include <stdint.h>
+
+void qos_save(void);
+void qos_restore(void);
+void pd_crypto_save(void);
+void pd_crypto_restore(void);
+void pd_dsu_core_save(void);
+void pd_dsu_core_restore(void);
+void pd_php_save(void);
+void pd_php_restore(void);
+
+void pm_reg_rgns_init(void);
+
+#endif
diff --git a/plat/rockchip/rk3588/drivers/pmu/pmu.c b/plat/rockchip/rk3588/drivers/pmu/pmu.c
new file mode 100644
index 0000000..83d6cad
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/pmu/pmu.c
@@ -0,0 +1,1439 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <drivers/arm/gicv3.h>
+#include <drivers/console.h>
+#include <drivers/delay_timer.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <pmu.h>
+
+#include <cpus_on_fixed_addr.h>
+#include <plat_pm_helpers.h>
+#include <plat_private.h>
+#include <pm_pd_regs.h>
+#include <rk3588_clk.h>
+#include <rockchip_sip_svc.h>
+#include <secure.h>
+#include <soc.h>
+
+#define PSRAM_SP_TOP	((PMUSRAM_BASE + PMUSRAM_RSIZE) & ~0xf)
+#define NONBOOT_CPUS_OFF_LOOP (500000)
+
+#define DSUGRF_REG_CNT			(0x78 / 4 + 1)
+#define BCORE_GRF_REG_CNT		(0x30 / 4 + 1)
+#define LCORE_GRF_REG_CNT		(0x30 / 4 + 1)
+
+#define CENTER_GRF_REG_CNT		(0x20 / 4 + 1)
+
+static struct psram_data_t *psram_sleep_cfg =
+	(struct psram_data_t *)&sys_sleep_flag_sram;
+
+static int8_t pd_repair_map[] = {
+	[PD_GPU] = PD_RPR_GPU,
+	[PD_NPU] = -1,
+	[PD_VCODEC] = -1,
+	[PD_NPUTOP] = PD_RPR_NPUTOP,
+	[PD_NPU1] = PD_RPR_NPU1,
+	[PD_NPU2] = PD_RPR_NPU2,
+	[PD_VENC0] = PD_RPR_VENC0,
+	[PD_VENC1] = PD_RPR_VENC1,
+	[PD_RKVDEC0] = PD_RPR_RKVDEC0,
+	[PD_RKVDEC1] = PD_RPR_RKVDEC1,
+	[PD_VDPU] = PD_RPR_VDPU,
+	[PD_RGA30] = PD_RPR_RGA30,
+	[PD_AV1] = PD_RPR_AV1,
+	[PD_VI] = PD_RPR_VI,
+	[PD_FEC] = PD_RPR_FEC,
+	[PD_ISP1] = PD_RPR_ISP1,
+	[PD_RGA31] = PD_RPR_RGA31,
+	[PD_VOP] = PD_RPR_VOP,
+	[PD_VO0] = PD_RPR_VO0,
+	[PD_VO1] = PD_RPR_VO1,
+	[PD_AUDIO] = PD_RPR_AUDIO,
+	[PD_PHP] = PD_RPR_PHP,
+	[PD_GMAC] = PD_RPR_GMAC,
+	[PD_PCIE] = PD_RPR_PCIE,
+	[PD_NVM] = -1,
+	[PD_NVM0] = PD_RPR_NVM0,
+	[PD_SDIO] = PD_RPR_SDIO,
+	[PD_USB] = PD_RPR_USB,
+	[PD_SECURE] = -1,
+	[PD_SDMMC] = PD_RPR_SDMMC,
+	[PD_CRYPTO] = PD_RPR_CRYPTO,
+	[PD_CENTER] = PD_RPR_CENTER,
+	[PD_DDR01] = PD_RPR_DDR01,
+	[PD_DDR23] = PD_RPR_DDR23,
+};
+
+struct rk3588_sleep_ddr_data {
+	uint32_t gpio0a_iomux_l, gpio0a_iomux_h, gpio0b_iomux_l;
+	uint32_t pmu_pd_st0, bus_idle_st0, qch_pwr_st;
+	uint32_t pmu2_vol_gate_con[3], pmu2_submem_gate_sft_con0;
+	uint32_t pmu2_bisr_con0;
+	uint32_t cpll_con0;
+	uint32_t cru_mode_con, busscru_mode_con;
+	uint32_t bussgrf_soc_con7;
+	uint32_t pmu0grf_soc_con0, pmu0grf_soc_con1, pmu0grf_soc_con3;
+	uint32_t pmu1grf_soc_con2, pmu1grf_soc_con7, pmu1grf_soc_con8, pmu1grf_soc_con9;
+	uint32_t pmu0sgrf_soc_con1;
+	uint32_t pmu1sgrf_soc_con14;
+	uint32_t ddrgrf_chn_con0[4], ddrgrf_chn_con1[4],
+		ddrgrf_chn_con2[4], pmu1_ddr_pwr_sft_con[4];
+	uint32_t pmu1cru_clksel_con1;
+};
+
+static struct rk3588_sleep_ddr_data ddr_data;
+
+struct rk3588_sleep_pmusram_data {
+	uint32_t dsusgrf_soc_con[DSUSGRF_SOC_CON_CNT],
+		dsusgrf_ddr_hash_con[DSUSGRF_DDR_HASH_CON_CNT];
+	uint32_t dsu_ddr_fw_rgn_reg[FIREWALL_DSU_RGN_CNT],
+		dsu_ddr_fw_mst_reg[FIREWALL_DSU_MST_CNT],
+		dsu_ddr_fw_con_reg[FIREWALL_DSU_CON_CNT];
+	uint32_t busioc_gpio0b_iomux_h;
+};
+
+static __pmusramdata struct rk3588_sleep_pmusram_data pmusram_data;
+
+static __pmusramfunc void dsu_restore_early(void)
+{
+	int i;
+
+	/* dsusgrf */
+	for (i = 0; i < DSUSGRF_SOC_CON_CNT; i++)
+		mmio_write_32(DSUSGRF_BASE + DSUSGRF_SOC_CON(i),
+			      WITH_16BITS_WMSK(pmusram_data.dsusgrf_soc_con[i]));
+
+	for (i = 0; i < DSUSGRF_DDR_HASH_CON_CNT; i++)
+		mmio_write_32(DSUSGRF_BASE + DSUSGRF_DDR_HASH_CON(i),
+			      pmusram_data.dsusgrf_ddr_hash_con[i]);
+
+	/* dsu ddr firewall */
+	for (i = 0; i < FIREWALL_DSU_RGN_CNT; i++)
+		mmio_write_32(FIREWALL_DSU_BASE + FIREWALL_DSU_RGN(i),
+			      pmusram_data.dsu_ddr_fw_rgn_reg[i]);
+
+	for (i = 0; i < FIREWALL_DSU_MST_CNT; i++)
+		mmio_write_32(FIREWALL_DSU_BASE + FIREWALL_DSU_MST(i),
+			      pmusram_data.dsu_ddr_fw_mst_reg[i]);
+
+	for (i = 0; i < FIREWALL_DSU_CON_CNT; i++)
+		mmio_write_32(FIREWALL_DSU_BASE + FIREWALL_DSU_CON(i),
+			      pmusram_data.dsu_ddr_fw_con_reg[i]);
+}
+
+static __pmusramfunc void ddr_resume(void)
+{
+	dsu_restore_early();
+}
+
+static void dsu_core_save(void)
+{
+	int i;
+
+	/* dsusgrf */
+	for (i = 0; i < DSUSGRF_SOC_CON_CNT; i++)
+		pmusram_data.dsusgrf_soc_con[i] =
+			mmio_read_32(DSUSGRF_BASE + DSUSGRF_SOC_CON(i));
+
+	for (i = 0; i < DSUSGRF_DDR_HASH_CON_CNT; i++)
+		pmusram_data.dsusgrf_ddr_hash_con[i] =
+			mmio_read_32(DSUSGRF_BASE + DSUSGRF_DDR_HASH_CON(i));
+
+	/* dsu ddr firewall */
+	for (i = 0; i < FIREWALL_DSU_RGN_CNT; i++)
+		pmusram_data.dsu_ddr_fw_rgn_reg[i] =
+			mmio_read_32(FIREWALL_DSU_BASE + FIREWALL_DSU_RGN(i));
+
+	for (i = 0; i < FIREWALL_DSU_MST_CNT; i++)
+		pmusram_data.dsu_ddr_fw_mst_reg[i] =
+			mmio_read_32(FIREWALL_DSU_BASE + FIREWALL_DSU_MST(i));
+
+	for (i = 0; i < FIREWALL_DSU_CON_CNT; i++)
+		pmusram_data.dsu_ddr_fw_con_reg[i] =
+			mmio_read_32(FIREWALL_DSU_BASE + FIREWALL_DSU_CON(i));
+
+	pvtplls_suspend();
+	pd_dsu_core_save();
+}
+
+static void dsu_core_restore(void)
+{
+	pd_dsu_core_restore();
+	pvtplls_resume();
+}
+
+static uint32_t clk_save[CRU_CLKGATE_CON_CNT + PHPCRU_CLKGATE_CON_CNT +
+			 SECURECRU_CLKGATE_CON_CNT + PMU1CRU_CLKGATE_CON_CNT];
+
+void clk_gate_con_save(void)
+{
+	int i, j = 0;
+
+	for (i = 0; i < CRU_CLKGATE_CON_CNT; i++, j++)
+		clk_save[j] = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
+
+	clk_save[j] = mmio_read_32(PHP_CRU_BASE + PHPCRU_CLKGATE_CON);
+
+	for (i = 0; i < SECURECRU_CLKGATE_CON_CNT; i++, j++)
+		clk_save[j] = mmio_read_32(SCRU_BASE + SECURECRU_CLKGATE_CON(i));
+
+	for (i = 0; i < PMU1CRU_CLKGATE_CON_CNT; i++, j++)
+		clk_save[j] = mmio_read_32(PMU1CRU_BASE + CRU_CLKGATE_CON(i));
+}
+
+void clk_gate_con_disable(void)
+{
+	int i;
+
+	for (i = 0; i < CRU_CLKGATE_CON_CNT; i++)
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i), 0xffff0000);
+
+	 mmio_write_32(PHP_CRU_BASE + PHPCRU_CLKGATE_CON, 0xffff0000);
+
+	for (i = 0; i < SECURECRU_CLKGATE_CON_CNT; i++)
+		mmio_write_32(SCRU_BASE + SECURECRU_CLKGATE_CON(i), 0xffff0000);
+
+	for (i = 0; i < PMU1CRU_CLKGATE_CON_CNT; i++)
+		mmio_write_32(PMU1CRU_BASE + CRU_CLKGATE_CON(i), 0xffff0000);
+}
+
+void clk_gate_con_restore(void)
+{
+	int i, j = 0;
+
+	for (i = 0; i < CRU_CLKGATE_CON_CNT; i++, j++)
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
+			      WITH_16BITS_WMSK(clk_save[j]));
+
+	mmio_write_32(PHP_CRU_BASE + PHPCRU_CLKGATE_CON,
+		      WITH_16BITS_WMSK(clk_save[j]));
+
+	for (i = 0; i < SECURECRU_CLKGATE_CON_CNT; i++, j++)
+		mmio_write_32(SCRU_BASE + SECURECRU_CLKGATE_CON(i),
+			      WITH_16BITS_WMSK(clk_save[j]));
+
+	for (i = 0; i < PMU1CRU_CLKGATE_CON_CNT; i++, j++)
+		mmio_write_32(PMU1CRU_BASE + CRU_CLKGATE_CON(i),
+			      WITH_16BITS_WMSK(clk_save[j]));
+}
+
+static void pmu_bus_idle_req(uint32_t bus, uint32_t state)
+{
+	uint32_t wait_cnt = 0;
+
+	mmio_write_32(PMU_BASE + PMU2_BUS_IDLE_SFTCON(bus / 16),
+		      BITS_WITH_WMASK(state, 0x1, bus % 16));
+
+	while (pmu_bus_idle_st(bus) != state ||
+	       pmu_bus_idle_ack(bus) != state) {
+		if (++wait_cnt > BUS_IDLE_LOOP)
+			break;
+		udelay(1);
+	}
+
+	if (wait_cnt > BUS_IDLE_LOOP)
+		WARN("%s: can't  wait state %d for bus %d (0x%x)\n",
+		     __func__, state, bus,
+		     mmio_read_32(PMU_BASE + PMU2_BUS_IDLE_ST(bus / 32)));
+}
+
+static void pmu_qch_pwr_ctlr(uint32_t msk, uint32_t state)
+{
+	uint32_t wait_cnt = 0;
+
+	if (state != 0)
+		state = msk;
+
+	mmio_write_32(PMU_BASE + PMU2_QCHANNEL_PWR_SFTCON,
+		      BITS_WITH_WMASK(state, msk, 0));
+
+	while ((mmio_read_32(PMU_BASE + PMU2_QCHANNEL_STATUS) & msk) != state) {
+		if (++wait_cnt > QCH_PWR_LOOP)
+			break;
+		udelay(1);
+	}
+
+	if (wait_cnt > BUS_IDLE_LOOP)
+		WARN("%s: can't wait qch:0x%x to state:0x%x (0x%x)\n",
+		     __func__, msk, state,
+		     mmio_read_32(PMU_BASE + PMU2_QCHANNEL_STATUS));
+}
+
+static inline uint32_t pmu_power_domain_chain_st(uint32_t pd)
+{
+	return mmio_read_32(PMU_BASE + PMU2_PWR_CHAIN1_ST(pd / 32)) & BIT(pd % 32) ?
+	       pmu_pd_on :
+	       pmu_pd_off;
+}
+
+static inline uint32_t pmu_power_domain_mem_st(uint32_t pd)
+{
+	return mmio_read_32(PMU_BASE + PMU2_PWR_MEM_ST(pd / 32)) & BIT(pd % 32) ?
+	       pmu_pd_off :
+	       pmu_pd_on;
+}
+
+static inline uint32_t pmu_power_domain_st(uint32_t pd)
+{
+	int8_t pd_repair = pd_repair_map[pd];
+
+	if (pd_repair >= 0)
+		return mmio_read_32(PMU_BASE + PMU2_BISR_STATUS(4)) & BIT(pd_repair) ?
+		       pmu_pd_on :
+		       pmu_pd_off;
+	else
+		return mmio_read_32(PMU_BASE + PMU2_PWR_GATE_ST(pd / 32)) & BIT(pd % 32) ?
+		       pmu_pd_off :
+		       pmu_pd_on;
+}
+
+static int pmu_power_domain_pd_to_mem_st(uint32_t pd, uint32_t *pd_mem_st)
+{
+	uint32_t mem_st;
+
+	switch (pd) {
+	case PD_NPUTOP:
+		mem_st = PD_NPU_TOP_MEM_ST;
+		break;
+	case PD_NPU1:
+		mem_st = PD_NPU1_MEM_ST;
+		break;
+	case PD_NPU2:
+		mem_st = PD_NPU2_MEM_ST;
+		break;
+	case PD_VENC0:
+		mem_st = PD_VENC0_MEM_ST;
+		break;
+	case PD_VENC1:
+		mem_st = PD_VENC1_MEM_ST;
+		break;
+	case PD_RKVDEC0:
+		mem_st = PD_RKVDEC0_MEM_ST;
+		break;
+	case PD_RKVDEC1:
+		mem_st = PD_RKVDEC1_MEM_ST;
+		break;
+	case PD_RGA30:
+		mem_st = PD_RGA30_MEM_ST;
+		break;
+	case PD_AV1:
+		mem_st = PD_AV1_MEM_ST;
+		break;
+	case PD_VI:
+		mem_st = PD_VI_MEM_ST;
+		break;
+	case PD_FEC:
+		mem_st = PD_FEC_MEM_ST;
+		break;
+	case PD_ISP1:
+		mem_st = PD_ISP1_MEM_ST;
+		break;
+	case PD_RGA31:
+		mem_st = PD_RGA31_MEM_ST;
+		break;
+	case PD_VOP:
+		mem_st = PD_VOP_MEM_ST;
+		break;
+	case PD_VO0:
+		mem_st = PD_VO0_MEM_ST;
+		break;
+	case PD_VO1:
+		mem_st = PD_VO1_MEM_ST;
+		break;
+	case PD_AUDIO:
+		mem_st = PD_AUDIO_MEM_ST;
+		break;
+	case PD_PHP:
+		mem_st = PD_PHP_MEM_ST;
+		break;
+	case PD_GMAC:
+		mem_st = PD_GMAC_MEM_ST;
+		break;
+	case PD_PCIE:
+		mem_st = PD_PCIE_MEM_ST;
+		break;
+	case PD_NVM0:
+		mem_st = PD_NVM0_MEM_ST;
+		break;
+	case PD_SDIO:
+		mem_st = PD_SDIO_MEM_ST;
+		break;
+	case PD_USB:
+		mem_st = PD_USB_MEM_ST;
+		break;
+	case PD_SDMMC:
+		mem_st = PD_SDMMC_MEM_ST;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*pd_mem_st = mem_st;
+
+	return 0;
+}
+
+static int pmu_power_domain_reset_mem(uint32_t pd, uint32_t pd_mem_st)
+{
+	uint32_t loop = 0;
+	int ret = 0;
+
+	while (pmu_power_domain_chain_st(pd_mem_st) != pmu_pd_on) {
+		udelay(1);
+		loop++;
+		if (loop >= PD_CTR_LOOP) {
+			WARN("%s: %d chain up time out\n", __func__, pd);
+			ret = -EINVAL;
+			goto error;
+		}
+	}
+
+	udelay(60);
+
+	mmio_write_32(PMU_BASE + PMU2_MEMPWR_GATE_SFTCON(pd / 16),
+		      BITS_WITH_WMASK(pmu_pd_off, 0x1, pd % 16));
+	dsb();
+
+	loop = 0;
+	while (pmu_power_domain_mem_st(pd_mem_st) != pmu_pd_off) {
+		udelay(1);
+		loop++;
+		if (loop >= PD_CTR_LOOP) {
+			WARN("%s: %d mem down time out\n", __func__, pd);
+			ret = -EINVAL;
+			goto error;
+		}
+	}
+
+	mmio_write_32(PMU_BASE + PMU2_MEMPWR_GATE_SFTCON(pd / 16),
+		      BITS_WITH_WMASK(pmu_pd_on, 0x1, pd % 16));
+	dsb();
+
+	loop = 0;
+	while (pmu_power_domain_mem_st(pd_mem_st) != pmu_pd_on) {
+		udelay(1);
+		loop++;
+		if (loop >= PD_CTR_LOOP) {
+			WARN("%s: %d mem up time out\n", __func__, pd);
+			ret = -EINVAL;
+			goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	return ret;
+}
+
+static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state)
+{
+	uint32_t loop = 0;
+	uint32_t is_mem_on = pmu_pd_off;
+	uint32_t pd_mem_st;
+	int ret = 0;
+
+	if (pd_state == pmu_pd_on) {
+		ret = pmu_power_domain_pd_to_mem_st(pd, &pd_mem_st);
+		if (ret == 0) {
+			is_mem_on = pmu_power_domain_mem_st(pd_mem_st);
+			if (is_mem_on == pmu_pd_on)
+				WARN("%s: %d mem is up\n", __func__, pd);
+		}
+	}
+
+	mmio_write_32(PMU_BASE + PMU2_PWR_GATE_SFTCON(pd / 16),
+		      BITS_WITH_WMASK(pd_state, 0x1, pd % 16));
+	dsb();
+
+	if (is_mem_on == pmu_pd_on) {
+		ret = pmu_power_domain_reset_mem(pd, pd_mem_st);
+		if (ret != 0)
+			goto out;
+		WARN("%s: %d mem reset ok\n", __func__, pd);
+	}
+
+	while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) {
+		udelay(1);
+		loop++;
+	}
+
+	if (pmu_power_domain_st(pd) != pd_state) {
+		WARN("%s: %d, %d, (0x%x, 0x%x) error!\n", __func__, pd, pd_state,
+		     mmio_read_32(PMU_BASE + PMU2_PWR_GATE_ST(0)),
+		     mmio_read_32(PMU_BASE + PMU2_BISR_STATUS(4)));
+		ret = -EINVAL;
+	}
+
+out:
+	return ret;
+}
+
+static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state)
+{
+	uint32_t state;
+
+	if (pmu_power_domain_st(pd_id) == pd_state)
+		goto out;
+
+	if (pd_state == pmu_pd_on)
+		pmu_power_domain_ctr(pd_id, pd_state);
+
+	state = (pd_state == pmu_pd_off) ? bus_idle : bus_active;
+
+	switch (pd_id) {
+	case PD_GPU:
+		pmu_bus_idle_req(BUS_ID_GPU, state);
+		break;
+	case PD_NPUTOP:
+		pmu_bus_idle_req(BUS_ID_NPUTOP, state);
+		break;
+	case PD_NPU1:
+		pmu_bus_idle_req(BUS_ID_NPU1, state);
+		break;
+	case PD_NPU2:
+		pmu_bus_idle_req(BUS_ID_NPU2, state);
+		break;
+	case PD_VENC0:
+		pmu_bus_idle_req(BUS_ID_RKVENC0, state);
+		break;
+	case PD_VENC1:
+		pmu_bus_idle_req(BUS_ID_RKVENC1, state);
+		break;
+	case PD_RKVDEC0:
+		pmu_bus_idle_req(BUS_ID_RKVDEC0, state);
+		break;
+	case PD_RKVDEC1:
+		pmu_bus_idle_req(BUS_ID_RKVDEC1, state);
+		break;
+	case PD_VDPU:
+		pmu_bus_idle_req(BUS_ID_VDPU, state);
+		break;
+	case PD_AV1:
+		pmu_bus_idle_req(BUS_ID_AV1, state);
+		break;
+	case PD_VI:
+		pmu_bus_idle_req(BUS_ID_VI, state);
+		break;
+	case PD_ISP1:
+		pmu_bus_idle_req(BUS_ID_ISP, state);
+		break;
+	case PD_RGA31:
+		pmu_bus_idle_req(BUS_ID_RGA31, state);
+		break;
+	case PD_VOP:
+		pmu_bus_idle_req(BUS_ID_VOP_CHANNEL, state);
+		pmu_bus_idle_req(BUS_ID_VOP, state);
+		break;
+	case PD_VO0:
+		pmu_bus_idle_req(BUS_ID_VO0, state);
+		break;
+	case PD_VO1:
+		pmu_bus_idle_req(BUS_ID_VO1, state);
+		break;
+	case PD_AUDIO:
+		pmu_bus_idle_req(BUS_ID_AUDIO, state);
+		break;
+	case PD_PHP:
+		pmu_bus_idle_req(BUS_ID_PHP, state);
+		break;
+	case PD_NVM:
+		pmu_bus_idle_req(BUS_ID_NVM, state);
+		break;
+	case PD_SDIO:
+		pmu_bus_idle_req(BUS_ID_SDIO, state);
+		break;
+	case PD_USB:
+		pmu_bus_idle_req(BUS_ID_USB, state);
+		break;
+	case PD_SECURE:
+		pmu_bus_idle_req(BUS_ID_SECURE, state);
+		break;
+	default:
+		break;
+	}
+
+	if (pd_state == pmu_pd_off)
+		pmu_power_domain_ctr(pd_id, pd_state);
+
+out:
+	return 0;
+}
+
+static void pmu_power_domains_suspend(void)
+{
+	ddr_data.qch_pwr_st =
+		mmio_read_32(PMU_BASE + PMU2_QCHANNEL_STATUS) & PMU2_QCH_PWR_MSK;
+	ddr_data.pmu_pd_st0 = mmio_read_32(PMU_BASE + PMU2_PWR_GATE_ST(0));
+	ddr_data.bus_idle_st0 = mmio_read_32(PMU_BASE + PMU2_BUS_IDLE_ST(0));
+
+	qos_save();
+
+	if ((ddr_data.pmu_pd_st0 & BIT(PD_PHP)) == 0)
+		pd_php_save();
+
+	if ((ddr_data.pmu_pd_st0 & BIT(PD_CRYPTO)) == 0)
+		pd_crypto_save();
+
+	pmu_qch_pwr_ctlr(0x20, 1);
+	pmu_qch_pwr_ctlr(0x40, 1);
+	pmu_qch_pwr_ctlr(0x1, 1);
+	pmu_qch_pwr_ctlr(0x2, 1);
+	pmu_qch_pwr_ctlr(0x4, 1);
+	pmu_qch_pwr_ctlr(0x8, 1);
+	pmu_qch_pwr_ctlr(0x10, 1);
+
+	pmu_bus_idle_req(BUS_ID_VO1USBTOP, bus_idle);
+	pmu_bus_idle_req(BUS_ID_SECURE_VO1USB_CHANNEL, bus_idle);
+
+	pmu_bus_idle_req(BUS_ID_USB, bus_idle);
+
+	pmu_set_power_domain(PD_GPU, pmu_pd_off);
+
+	pmu_set_power_domain(PD_NPU1, pmu_pd_off);
+	pmu_set_power_domain(PD_NPU2, pmu_pd_off);
+	pmu_set_power_domain(PD_NPUTOP, pmu_pd_off);
+	pmu_set_power_domain(PD_NPU, pmu_pd_off);
+
+	pmu_set_power_domain(PD_RKVDEC1, pmu_pd_off);
+	pmu_set_power_domain(PD_RKVDEC0, pmu_pd_off);
+	pmu_set_power_domain(PD_VENC1, pmu_pd_off);
+	pmu_set_power_domain(PD_VENC0, pmu_pd_off);
+	pmu_set_power_domain(PD_VCODEC, pmu_pd_off);
+
+	pmu_set_power_domain(PD_RGA30, pmu_pd_off);
+	pmu_set_power_domain(PD_AV1, pmu_pd_off);
+	pmu_set_power_domain(PD_VDPU, pmu_pd_off);
+
+	pmu_set_power_domain(PD_VO0, pmu_pd_off);
+	pmu_set_power_domain(PD_VO1, pmu_pd_off);
+	pmu_set_power_domain(PD_VOP, pmu_pd_off);
+
+	pmu_set_power_domain(PD_FEC, pmu_pd_off);
+	pmu_set_power_domain(PD_ISP1, pmu_pd_off);
+	pmu_set_power_domain(PD_VI, pmu_pd_off);
+
+	pmu_set_power_domain(PD_RGA31, pmu_pd_off);
+
+	pmu_set_power_domain(PD_AUDIO, pmu_pd_off);
+
+	pmu_set_power_domain(PD_GMAC, pmu_pd_off);
+	pmu_set_power_domain(PD_PCIE, pmu_pd_off);
+	pmu_set_power_domain(PD_PHP, pmu_pd_off);
+
+	pmu_set_power_domain(PD_SDIO, pmu_pd_off);
+
+	pmu_set_power_domain(PD_NVM0, pmu_pd_off);
+	pmu_set_power_domain(PD_NVM, pmu_pd_off);
+
+	pmu_set_power_domain(PD_SDMMC, pmu_pd_off);
+	pmu_set_power_domain(PD_CRYPTO, pmu_pd_off);
+}
+
+static void pmu_power_domains_resume(void)
+{
+	int i;
+
+	pmu_set_power_domain(PD_CRYPTO, !!(ddr_data.pmu_pd_st0 & BIT(PD_CRYPTO)));
+	pmu_set_power_domain(PD_SDMMC, !!(ddr_data.pmu_pd_st0 & BIT(PD_SDMMC)));
+
+	pmu_set_power_domain(PD_NVM, !!(ddr_data.pmu_pd_st0 & BIT(PD_NVM)));
+	pmu_set_power_domain(PD_NVM0, !!(ddr_data.pmu_pd_st0 & BIT(PD_NVM0)));
+
+	pmu_set_power_domain(PD_SDIO, !!(ddr_data.pmu_pd_st0 & BIT(PD_SDIO)));
+
+	pmu_set_power_domain(PD_PHP, !!(ddr_data.pmu_pd_st0 & BIT(PD_PHP)));
+	pmu_set_power_domain(PD_PCIE, !!(ddr_data.pmu_pd_st0 & BIT(PD_PCIE)));
+	pmu_set_power_domain(PD_GMAC, !!(ddr_data.pmu_pd_st0 & BIT(PD_GMAC)));
+
+	pmu_set_power_domain(PD_AUDIO, !!(ddr_data.pmu_pd_st0 & BIT(PD_AUDIO)));
+
+	pmu_set_power_domain(PD_USB, !!(ddr_data.pmu_pd_st0 & BIT(PD_USB)));
+
+	pmu_set_power_domain(PD_RGA31, !!(ddr_data.pmu_pd_st0 & BIT(PD_RGA31)));
+
+	pmu_set_power_domain(PD_VI, !!(ddr_data.pmu_pd_st0 & BIT(PD_VI)));
+	pmu_set_power_domain(PD_ISP1, !!(ddr_data.pmu_pd_st0 & BIT(PD_ISP1)));
+	pmu_set_power_domain(PD_FEC, !!(ddr_data.pmu_pd_st0 & BIT(PD_FEC)));
+
+	pmu_set_power_domain(PD_VOP, !!(ddr_data.pmu_pd_st0 & BIT(PD_VOP)));
+
+	pmu_set_power_domain(PD_VO1, !!(ddr_data.pmu_pd_st0 & BIT(PD_VO1)));
+
+	pmu_set_power_domain(PD_VO0, !!(ddr_data.pmu_pd_st0 & BIT(PD_VO0)));
+
+	pmu_set_power_domain(PD_VDPU, !!(ddr_data.pmu_pd_st0 & BIT(PD_VDPU)));
+	pmu_set_power_domain(PD_AV1, !!(ddr_data.pmu_pd_st0 & BIT(PD_AV1)));
+	pmu_set_power_domain(PD_RGA30, !!(ddr_data.pmu_pd_st0 & BIT(PD_RGA30)));
+
+	pmu_set_power_domain(PD_VCODEC, !!(ddr_data.pmu_pd_st0 & BIT(PD_VCODEC)));
+	pmu_set_power_domain(PD_VENC0, !!(ddr_data.pmu_pd_st0 & BIT(PD_VENC0)));
+	pmu_set_power_domain(PD_VENC1, !!(ddr_data.pmu_pd_st0 & BIT(PD_VENC1)));
+	pmu_set_power_domain(PD_RKVDEC0, !!(ddr_data.pmu_pd_st0 & BIT(PD_RKVDEC0)));
+	pmu_set_power_domain(PD_RKVDEC1, !!(ddr_data.pmu_pd_st0 & BIT(PD_RKVDEC1)));
+
+	pmu_set_power_domain(PD_NPU, !!(ddr_data.pmu_pd_st0 & BIT(PD_NPU)));
+	pmu_set_power_domain(PD_NPUTOP, !!(ddr_data.pmu_pd_st0 & BIT(PD_NPUTOP)));
+	pmu_set_power_domain(PD_NPU2, !!(ddr_data.pmu_pd_st0 & BIT(PD_NPU2)));
+	pmu_set_power_domain(PD_NPU1, !!(ddr_data.pmu_pd_st0 & BIT(PD_NPU1)));
+
+	pmu_set_power_domain(PD_GPU, !!(ddr_data.pmu_pd_st0 & BIT(PD_GPU)));
+
+	for (i = 0; i < 32; i++)
+		pmu_bus_idle_req(i, !!(ddr_data.bus_idle_st0 & BIT(i)));
+
+	pmu_qch_pwr_ctlr(0x10, !!(ddr_data.qch_pwr_st & 0x10));
+	pmu_qch_pwr_ctlr(0x8, !!(ddr_data.qch_pwr_st & 0x8));
+	pmu_qch_pwr_ctlr(0x4, !!(ddr_data.qch_pwr_st & 0x4));
+	pmu_qch_pwr_ctlr(0x2, !!(ddr_data.qch_pwr_st & 0x2));
+	pmu_qch_pwr_ctlr(0x1, !!(ddr_data.qch_pwr_st & 0x1));
+	pmu_qch_pwr_ctlr(0x40, !!(ddr_data.qch_pwr_st & 0x40));
+	pmu_qch_pwr_ctlr(0x20, !!(ddr_data.qch_pwr_st & 0x20));
+
+	if ((ddr_data.pmu_pd_st0 & BIT(PD_CRYPTO)) == 0)
+		pd_crypto_restore();
+
+	if ((ddr_data.pmu_pd_st0 & BIT(PD_PHP)) == 0)
+		pd_php_restore();
+
+	qos_restore();
+}
+
+static int cpus_power_domain_on(uint32_t cpu_id)
+{
+	mmio_write_32(PMU_BASE + PMU2_CPU_AUTO_PWR_CON(cpu_id),
+		      BITS_WITH_WMASK(0, 0x1, core_pm_en));
+	mmio_write_32(PMU_BASE + PMU2_CPU_AUTO_PWR_CON(cpu_id),
+		      BITS_WITH_WMASK(1, 0x1, core_pm_sft_wakeup_en));
+	dsb();
+
+	return 0;
+}
+
+static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
+{
+	uint32_t apm_value = BIT(core_pm_en);
+
+	if (pd_cfg == core_pwr_wfi_int)
+		apm_value |= BIT(core_pm_int_wakeup_en);
+
+	mmio_write_32(PMU_BASE + PMU2_CPU_AUTO_PWR_CON(cpu_id),
+		      BITS_WITH_WMASK(apm_value, 0x3, 0));
+	dsb();
+
+	return 0;
+}
+
+static inline void cpus_pd_req_enter_wfi(void)
+{
+	/* CORTEX_A55_CPUACTLR_EL1 */
+	__asm__ volatile ("msr	DBGPRCR_EL1, xzr\n"
+			  "mrs	x0, S3_0_C15_C2_7\n"
+			  "orr	x0, x0, #0x1\n"
+			  "msr	S3_0_C15_C2_7, x0\n"
+			  "wfi_loop:\n"
+			  "isb\n"
+			  "wfi\n"
+			  "b wfi_loop\n");
+}
+
+static void nonboot_cpus_off(void)
+{
+	uint32_t boot_cpu, cpu, tmp;
+	uint32_t exp_st;
+	uint32_t bcore0_rst_msk = 0, bcore1_rst_msk = 0;
+	int wait_cnt;
+
+	bcore0_rst_msk = CRU_BIGCPU02_RST_MSK | CRU_BIGCPU13_RST_MSK;
+	bcore1_rst_msk = CRU_BIGCPU02_RST_MSK | CRU_BIGCPU13_RST_MSK;
+
+	mmio_write_32(BIGCORE0CRU_BASE + 0xa00, BITS_WITH_WMASK(0, bcore0_rst_msk, 0));
+	mmio_write_32(BIGCORE1CRU_BASE + 0xa00, BITS_WITH_WMASK(0, bcore1_rst_msk, 0));
+
+	wait_cnt = NONBOOT_CPUS_OFF_LOOP;
+	exp_st = SYS_GRF_BIG_CPUS_WFE;
+	do {
+		wait_cnt--;
+		tmp = mmio_read_32(SYSGRF_BASE + SYS_GRF_SOC_STATUS(3));
+		tmp &= SYS_GRF_BIG_CPUS_WFE;
+	} while (tmp != exp_st && wait_cnt);
+
+	boot_cpu = plat_my_core_pos();
+
+	/* turn off noboot cpus */
+	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
+		if (cpu == boot_cpu)
+			continue;
+		cpus_power_domain_off(cpu, core_pwr_wfi);
+	}
+
+	mmio_write_32(SRAM_BASE + 0x08, (uintptr_t)&cpus_pd_req_enter_wfi);
+	mmio_write_32(SRAM_BASE + 0x04, 0xdeadbeaf);
+
+	dsb();
+	isb();
+
+	sev();
+
+	wait_cnt = NONBOOT_CPUS_OFF_LOOP;
+	do {
+		wait_cnt--;
+		tmp = mmio_read_32(PMU_BASE + PMU2_CLUSTER_ST);
+		tmp &= CLUSTER_STS_NONBOOT_CPUS_DWN;
+	} while (tmp != CLUSTER_STS_NONBOOT_CPUS_DWN && wait_cnt);
+
+	if (tmp != CLUSTER_STS_NONBOOT_CPUS_DWN)
+		ERROR("nonboot cpus status(%x) error!\n", tmp);
+}
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr,
+				 uint64_t entrypoint)
+{
+	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
+
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+	assert(cpuson_flags[cpu_id] == 0);
+	cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
+	cpuson_entry_point[cpu_id] = entrypoint;
+	dsb();
+
+	flush_dcache_range((uintptr_t)cpuson_flags, sizeof(cpuson_flags));
+	flush_dcache_range((uintptr_t)cpuson_entry_point,
+			   sizeof(cpuson_entry_point));
+	dsb();
+	isb();
+
+	cpus_power_domain_on(cpu_id);
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_on_finish(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	mmio_write_32(PMU_BASE + PMU2_CPU_AUTO_PWR_CON(cpu_id),
+		      BITS_WITH_WMASK(0, 0xf, 0));
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_off(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	cpus_power_domain_off(cpu_id, core_pwr_wfi);
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_suspend(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+
+	cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
+	cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint();
+	dsb();
+	flush_dcache_range((uintptr_t)cpuson_flags, sizeof(cpuson_flags));
+	flush_dcache_range((uintptr_t)cpuson_entry_point,
+			   sizeof(cpuson_entry_point));
+	dsb();
+	isb();
+
+	cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
+
+	__asm__ volatile ("msr	DBGPRCR_EL1, xzr\n"
+			  "mrs	x0, S3_0_C15_C2_7\n"
+			  "orr	x0, x0, #0x1\n"
+			  "msr	S3_0_C15_C2_7, x0\n");
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_resume(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	mmio_write_32(PMU_BASE + PMU2_CPU_AUTO_PWR_CON(cpu_id),
+		      BITS_WITH_WMASK(0, 0x3, 0));
+
+	dsb();
+
+	return PSCI_E_SUCCESS;
+}
+
+static void ddr_sleep_config(void)
+{
+	int i;
+
+	if (pmu_power_domain_st(PD_DDR01) == 0) {
+		ddr_data.ddrgrf_chn_con0[0] =
+			mmio_read_32(DDR01GRF_BASE + DDRGRF_CHA_CON(0));
+		ddr_data.ddrgrf_chn_con0[1] =
+			mmio_read_32(DDR01GRF_BASE + DDRGRF_CHB_CON(0));
+		ddr_data.ddrgrf_chn_con1[0] =
+			mmio_read_32(DDR01GRF_BASE + DDRGRF_CHA_CON(1));
+		ddr_data.ddrgrf_chn_con1[1] =
+			mmio_read_32(DDR01GRF_BASE + DDRGRF_CHB_CON(1));
+		ddr_data.ddrgrf_chn_con2[0] =
+			mmio_read_32(DDR01GRF_BASE + DDRGRF_CHA_CON(2));
+		ddr_data.ddrgrf_chn_con2[1] =
+			mmio_read_32(DDR01GRF_BASE + DDRGRF_CHB_CON(2));
+
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHA_CON(2), 0x20002000);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHB_CON(2), 0x20002000);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHA_CON(2), 0x08000000);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHB_CON(2), 0x08000000);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHA_CON(0), 0x00200020);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHB_CON(0), 0x00200020);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHA_CON(1), 0x00400040);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHB_CON(1), 0x00400040);
+	}
+
+	if (pmu_power_domain_st(PD_DDR23) == 0) {
+		ddr_data.ddrgrf_chn_con0[2] =
+			mmio_read_32(DDR23GRF_BASE + DDRGRF_CHA_CON(0));
+		ddr_data.ddrgrf_chn_con0[3] =
+			mmio_read_32(DDR23GRF_BASE + DDRGRF_CHB_CON(0));
+		ddr_data.ddrgrf_chn_con1[2] =
+			mmio_read_32(DDR23GRF_BASE + DDRGRF_CHA_CON(1));
+		ddr_data.ddrgrf_chn_con1[3] =
+			mmio_read_32(DDR23GRF_BASE + DDRGRF_CHB_CON(1));
+		ddr_data.ddrgrf_chn_con2[2] =
+			mmio_read_32(DDR23GRF_BASE + DDRGRF_CHA_CON(2));
+		ddr_data.ddrgrf_chn_con2[3] =
+			mmio_read_32(DDR23GRF_BASE + DDRGRF_CHB_CON(2));
+
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHA_CON(2), 0x20002000);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHB_CON(2), 0x20002000);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHA_CON(2), 0x08000000);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHB_CON(2), 0x08000000);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHA_CON(0), 0x00200020);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHB_CON(0), 0x00200020);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHA_CON(1), 0x00400040);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHB_CON(1), 0x00400040);
+	}
+
+	for (i = 0; i < DDR_CHN_CNT; i++) {
+		ddr_data.pmu1_ddr_pwr_sft_con[i] =
+			mmio_read_32(PMU_BASE + PMU1_DDR_PWR_SFTCON(i));
+		mmio_write_32(PMU_BASE + PMU1_DDR_PWR_SFTCON(i), 0x0fff0900);
+	}
+}
+
+static void ddr_sleep_config_restore(void)
+{
+	int i;
+
+	for (i = 0; i < DDR_CHN_CNT; i++) {
+		mmio_write_32(PMU_BASE + PMU1_DDR_PWR_SFTCON(i),
+			      0x0fff0000 | ddr_data.pmu1_ddr_pwr_sft_con[i]);
+	}
+
+	if (pmu_power_domain_st(PD_DDR01) == 0) {
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHA_CON(1),
+			      0x00400000 | ddr_data.ddrgrf_chn_con1[0]);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHB_CON(1),
+			      0x00400000 | ddr_data.ddrgrf_chn_con1[1]);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHA_CON(0),
+			      0x00200000 | ddr_data.ddrgrf_chn_con0[0]);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHB_CON(0),
+			      0x00200000 | ddr_data.ddrgrf_chn_con0[1]);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHA_CON(2),
+			      0x28000000 | ddr_data.ddrgrf_chn_con2[0]);
+		mmio_write_32(DDR01GRF_BASE + DDRGRF_CHB_CON(2),
+			      0x28000000 | ddr_data.ddrgrf_chn_con2[1]);
+	}
+
+	if (pmu_power_domain_st(PD_DDR23) == 0) {
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHA_CON(1),
+			      0x00400000 | ddr_data.ddrgrf_chn_con1[2]);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHB_CON(1),
+			      0x00400000 | ddr_data.ddrgrf_chn_con1[3]);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHA_CON(0),
+			      0x00200000 | ddr_data.ddrgrf_chn_con0[2]);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHB_CON(0),
+			      0x00200000 | ddr_data.ddrgrf_chn_con0[3]);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHA_CON(2),
+			      0x28000000 | ddr_data.ddrgrf_chn_con2[2]);
+		mmio_write_32(DDR23GRF_BASE + DDRGRF_CHB_CON(2),
+			      0x28000000 | ddr_data.ddrgrf_chn_con2[3]);
+	}
+}
+
+static void pmu_sleep_config(void)
+{
+	uint32_t pmu1_pwr_con, pmu1_wkup_int_con, pmu1_cru_pwr_con;
+	uint32_t pmu1_ddr_pwr_con, pmu1_pll_pd_con[2] = {0};
+	uint32_t pmu2_dsu_pwr_con, pmu2_core_pwr_con, pmu2_clst_idle_con;
+	uint32_t pmu2_bus_idle_con[3] = {0}, pmu2_pwr_gate_con[3] = {0};
+	uint32_t pmu2_vol_gate_con[3] = {0}, pmu2_qch_pwr_con = 0;
+	int i;
+
+	ddr_data.pmu1grf_soc_con7 = mmio_read_32(PMU1GRF_BASE + PMU1_GRF_SOC_CON(7));
+	ddr_data.pmu1grf_soc_con8 = mmio_read_32(PMU1GRF_BASE + PMU1_GRF_SOC_CON(8));
+	ddr_data.pmu1grf_soc_con9 = mmio_read_32(PMU1GRF_BASE + PMU1_GRF_SOC_CON(9));
+	ddr_data.pmu1sgrf_soc_con14 = mmio_read_32(PMU1SGRF_BASE + PMU1_SGRF_SOC_CON(14));
+	ddr_data.pmu0sgrf_soc_con1 = mmio_read_32(PMU0SGRF_BASE + PMU0_SGRF_SOC_CON(1));
+	ddr_data.pmu0grf_soc_con1 = mmio_read_32(PMU0GRF_BASE + PMU0_GRF_SOC_CON(1));
+
+	ddr_data.pmu2_vol_gate_con[0] = mmio_read_32(PMU_BASE + PMU2_VOL_GATE_CON(0));
+	ddr_data.pmu2_vol_gate_con[1] = mmio_read_32(PMU_BASE + PMU2_VOL_GATE_CON(1));
+	ddr_data.pmu2_vol_gate_con[2] = mmio_read_32(PMU_BASE + PMU2_VOL_GATE_CON(2));
+
+	ddr_data.pmu2_submem_gate_sft_con0 =
+		mmio_read_32(PMU_BASE + PMU2_MEMPWR_MD_GATE_SFTCON(0));
+
+	/* save pmic_sleep iomux gpio0_a4 */
+	ddr_data.gpio0a_iomux_l = mmio_read_32(PMU0IOC_BASE + 0);
+	ddr_data.gpio0a_iomux_h = mmio_read_32(PMU0IOC_BASE + 4);
+	ddr_data.pmu0grf_soc_con3 = mmio_read_32(PMU0GRF_BASE + PMU0_GRF_SOC_CON(3));
+
+	/* PMU1 repair disable */
+	mmio_write_32(PMU0GRF_BASE + PMU0_GRF_SOC_CON(0), 0x00010000);
+
+	/* set pmic_sleep iomux */
+	mmio_write_32(PMU0IOC_BASE + 0,
+		      BITS_WITH_WMASK(1, 0xf, 8) |
+		      BITS_WITH_WMASK(1, 0xfu, 12));
+
+	/* set tsadc_shut_m0 pin iomux to gpio */
+	mmio_write_32(PMU0IOC_BASE + 0,
+		      BITS_WITH_WMASK(0, 0xf, 4));
+
+	/* set spi2_cs0/1 pin iomux to gpio */
+	mmio_write_32(PMU0IOC_BASE + 8,
+		      BITS_WITH_WMASK(0, 0xff, 0));
+
+	/* sleep 1~2 src select */
+	mmio_write_32(PMU0GRF_BASE + PMU0_GRF_SOC_CON(3),
+		      BITS_WITH_WMASK(0x8, 0xf, 0) |
+		      BITS_WITH_WMASK(0x8, 0xf, 4) |
+		      BITS_WITH_WMASK(0x0, 0x3, 8));
+
+	pmu1_wkup_int_con = BIT(WAKEUP_GPIO0_INT_EN) |
+			    BIT(WAKEUP_CPU0_INT_EN);
+
+	pmu1_pwr_con = BIT(powermode_en);
+
+	pmu1_cru_pwr_con =
+		BIT(alive_osc_mode_en) |
+		BIT(power_off_en) |
+		BIT(pd_clk_src_gate_en);
+
+	pmu1_ddr_pwr_con = 0;
+
+	pmu2_dsu_pwr_con =
+		BIT(DSU_PWRDN_EN) |
+		BIT(DSU_PWROFF_EN);
+
+	pmu2_core_pwr_con = BIT(CORE_PWRDN_EN);
+
+	pmu2_clst_idle_con =
+		BIT(IDLE_REQ_BIGCORE0_EN) |
+		BIT(IDLE_REQ_BIGCORE1_EN) |
+		BIT(IDLE_REQ_DSU_EN) |
+		BIT(IDLE_REQ_LITDSU_EN) |
+		BIT(IDLE_REQ_ADB400_CORE_QCH_EN);
+
+	pmu1_pll_pd_con[0] =
+		BIT(B0PLL_PD_EN) |
+		BIT(B1PLL_PD_EN) |
+		BIT(LPLL_PD_EN) |
+		BIT(V0PLL_PD_EN) |
+		BIT(AUPLL_PD_EN) |
+		BIT(GPLL_PD_EN) |
+		BIT(CPLL_PD_EN) |
+		BIT(NPLL_PD_EN);
+
+	pmu1_pll_pd_con[1] =
+		BIT(PPLL_PD_EN) |
+		BIT(SPLL_PD_EN);
+
+	pmu2_bus_idle_con[0] = 0;
+
+	pmu2_bus_idle_con[1] =
+		BIT(BUS_ID_SECURE - 16) |
+		BIT(BUS_ID_SECURE_CENTER_CHANNEL - 16) |
+		BIT(BUS_ID_CENTER_CHANNEL - 16);
+
+	pmu2_bus_idle_con[2] =
+		BIT(BUS_ID_MSCH - 32) |
+		BIT(BUS_ID_BUS - 32) |
+		BIT(BUS_ID_TOP - 32);
+
+	pmu2_pwr_gate_con[0] = 0;
+	pmu2_pwr_gate_con[1] = BIT(PD_SECURE - 16);
+	pmu2_pwr_gate_con[2] = 0;
+
+	pmu2_qch_pwr_con = 0;
+
+	pmu2_vol_gate_con[0] = 0x7;
+	pmu2_vol_gate_con[2] = 0;
+
+	mmio_write_32(PMU_BASE + PMU2_CORE_AUTO_PWR_CON(0), 0x00030000);
+	mmio_write_32(PMU_BASE + PMU2_CORE_AUTO_PWR_CON(1), 0x00030000);
+	mmio_write_32(PMU_BASE + PMU2_CORE_PWR_CON(0),
+		      WITH_16BITS_WMSK(pmu2_core_pwr_con));
+	mmio_write_32(PMU_BASE + PMU2_CORE_PWR_CON(1),
+		      WITH_16BITS_WMSK(pmu2_core_pwr_con));
+	mmio_write_32(PMU_BASE + PMU2_CLUSTER_IDLE_CON,
+		      WITH_16BITS_WMSK(pmu2_clst_idle_con));
+	mmio_write_32(PMU_BASE + PMU2_DSU_AUTO_PWR_CON, 0x00030000);
+	mmio_write_32(PMU_BASE + PMU2_DSU_PWR_CON,
+		      WITH_16BITS_WMSK(pmu2_dsu_pwr_con));
+
+	mmio_write_32(PMU_BASE + PMU1_OSC_STABLE_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU1_STABLE_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU1_WAKEUP_RST_CLR_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU1_PLL_LOCK_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU1_PWM_SWITCH_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_CORE0_STABLE_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_CORE0_PWRUP_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_CORE0_PWRDN_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_CORE1_STABLE_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_CORE1_PWRUP_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_CORE1_PWRDN_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_DSU_STABLE_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_DSU_PWRUP_CNT_THRESH, 24000);
+	mmio_write_32(PMU_BASE + PMU2_DSU_PWRDN_CNT_THRESH, 24000);
+
+	/* Config pmu power mode and pmu wakeup source */
+	mmio_write_32(PMU_BASE + PMU1_INT_MASK_CON,
+		      BITS_WITH_WMASK(1, 0x1, 0));
+
+	/* pmu1_pwr_con */
+	mmio_write_32(PMU_BASE + PMU1_PWR_CON,
+		      WITH_16BITS_WMSK(pmu1_pwr_con));
+
+	/* cru_pwr_con */
+	mmio_write_32(PMU_BASE + PMU1_CRU_PWR_CON,
+		      WITH_16BITS_WMSK(pmu1_cru_pwr_con));
+
+	/* wakeup source */
+	mmio_write_32(PMU_BASE + PMU1_WAKEUP_INT_CON, pmu1_wkup_int_con);
+
+	/* ddr pwr con */
+	for (i = 0; i < DDR_CHN_CNT; i++) {
+		mmio_write_32(PMU_BASE + PMU1_DDR_PWR_CON(i),
+			      WITH_16BITS_WMSK(pmu1_ddr_pwr_con));
+		pmu2_bus_idle_con[1] |=
+			BIT(BUS_ID_MSCH0 - 16 + i);
+	}
+
+	/* pll_pd */
+	mmio_write_32(PMU_BASE + PMU1_PLLPD_CON(0),
+		      WITH_16BITS_WMSK(pmu1_pll_pd_con[0]));
+	mmio_write_32(PMU_BASE + PMU1_PLLPD_CON(1),
+		      WITH_16BITS_WMSK(pmu1_pll_pd_con[1]));
+
+	/* bypass cpu1~7*/
+	mmio_write_32(PMU_BASE + PMU2_PWR_CON1, 0x00ff00fe);
+
+	/* bus idle */
+	mmio_write_32(PMU_BASE + PMU2_BUS_IDLE_CON(0),
+		      WITH_16BITS_WMSK(pmu2_bus_idle_con[0]));
+	mmio_write_32(PMU_BASE + PMU2_BUS_IDLE_CON(1),
+		      WITH_16BITS_WMSK(pmu2_bus_idle_con[1]));
+	mmio_write_32(PMU_BASE + PMU2_BUS_IDLE_CON(2),
+		      WITH_16BITS_WMSK(pmu2_bus_idle_con[2]));
+	mmio_write_32(PMU_BASE + PMU2_BUS_IDLE_CON(2),
+		      0xf000f000);
+	/* power gate */
+	mmio_write_32(PMU_BASE + PMU2_PWR_GATE_CON(0),
+		      WITH_16BITS_WMSK(pmu2_pwr_gate_con[0]));
+	mmio_write_32(PMU_BASE + PMU2_PWR_GATE_CON(1),
+		      WITH_16BITS_WMSK(pmu2_pwr_gate_con[1]));
+	mmio_write_32(PMU_BASE + PMU2_PWR_GATE_CON(2),
+		      WITH_16BITS_WMSK(pmu2_pwr_gate_con[2]));
+	/* vol gate */
+	mmio_write_32(PMU_BASE + PMU2_VOL_GATE_CON(0),
+		      BITS_WITH_WMASK(pmu2_vol_gate_con[0], 0x7, 0));
+	mmio_write_32(PMU_BASE + PMU2_VOL_GATE_CON(1), 0);
+	mmio_write_32(PMU_BASE + PMU2_VOL_GATE_CON(2),
+		      BITS_WITH_WMASK(pmu2_vol_gate_con[2], 0x3, 0));
+	/* qch */
+	mmio_write_32(PMU_BASE + PMU2_QCHANNEL_PWR_CON,
+		      BITS_WITH_WMASK(pmu2_qch_pwr_con, 0x7f, 0));
+
+	mmio_write_32(PMU_BASE + PMU2_MEMPWR_MD_GATE_SFTCON(0),
+		      0x000f000f);
+}
+
+static void pmu_sleep_restore(void)
+{
+	mmio_write_32(PMU1GRF_BASE + PMU1_GRF_SOC_CON(7),
+		      WITH_16BITS_WMSK(ddr_data.pmu1grf_soc_con7));
+	mmio_write_32(PMU1GRF_BASE + PMU1_GRF_SOC_CON(8),
+		      WITH_16BITS_WMSK(ddr_data.pmu1grf_soc_con8));
+	mmio_write_32(PMU1GRF_BASE + PMU1_GRF_SOC_CON(9),
+		      WITH_16BITS_WMSK(ddr_data.pmu1grf_soc_con9));
+	mmio_write_32(PMU1SGRF_BASE + PMU1_SGRF_SOC_CON(14),
+		      WITH_16BITS_WMSK(ddr_data.pmu1sgrf_soc_con14));
+
+	mmio_write_32(PMU0SGRF_BASE + PMU0_SGRF_SOC_CON(1),
+		      WITH_16BITS_WMSK(ddr_data.pmu0sgrf_soc_con1));
+	mmio_write_32(PMU0GRF_BASE + PMU0_GRF_SOC_CON(1),
+		      WITH_16BITS_WMSK(ddr_data.pmu0grf_soc_con1));
+
+	mmio_write_32(PMU_BASE + PMU2_CORE_PWR_CON(0), 0xffff0000);
+	mmio_write_32(PMU_BASE + PMU2_CORE_PWR_CON(1), 0xffff0000);
+	mmio_write_32(PMU_BASE + PMU2_CLUSTER_IDLE_CON, 0xffff0000);
+	mmio_write_32(PMU_BASE + PMU2_DSU_PWR_CON, 0xffff0000);
+	mmio_write_32(PMU_BASE + PMU2_PWR_CON1, 0xffff0000);
+
+	/* Must clear PMU1_WAKEUP_INT_CON because the wakeup source
+	 * in PMU1_WAKEUP_INT_CON will wakeup cpus in cpu_auto_pd state.
+	 */
+	mmio_write_32(PMU_BASE + PMU1_WAKEUP_INT_CON, 0);
+	mmio_write_32(PMU_BASE + PMU1_PWR_CON, 0xffff0000);
+	mmio_write_32(PMU_BASE + PMU1_INT_MASK_CON, 0x00010000);
+	mmio_write_32(PMU_BASE + PMU0_WAKEUP_INT_CON, 0x00010000);
+	mmio_write_32(PMU_BASE + PMU0_PWR_CON, 0xffff0000);
+
+	mmio_write_32(PMU_BASE + PMU2_VOL_GATE_CON(0),
+		      WITH_16BITS_WMSK(ddr_data.pmu2_vol_gate_con[0]));
+	mmio_write_32(PMU_BASE + PMU2_VOL_GATE_CON(1),
+		      WITH_16BITS_WMSK(ddr_data.pmu2_vol_gate_con[1]));
+	mmio_write_32(PMU_BASE + PMU2_VOL_GATE_CON(2),
+		      WITH_16BITS_WMSK(ddr_data.pmu2_vol_gate_con[2]));
+
+	mmio_write_32(PMU_BASE + PMU2_MEMPWR_MD_GATE_SFTCON(0),
+		      WITH_16BITS_WMSK(ddr_data.pmu2_submem_gate_sft_con0));
+
+	mmio_write_32(PMU0GRF_BASE + PMU0_GRF_SOC_CON(3),
+		      WITH_16BITS_WMSK(ddr_data.pmu0grf_soc_con3));
+	mmio_write_32(PMU1GRF_BASE + PMU1_GRF_SOC_CON(2),
+		      WITH_16BITS_WMSK(ddr_data.pmu1grf_soc_con2));
+
+	mmio_write_32(PMU0IOC_BASE + 0x4,
+		      WITH_16BITS_WMSK(ddr_data.gpio0a_iomux_h));
+	mmio_write_32(PMU0IOC_BASE + 0,
+		      WITH_16BITS_WMSK(ddr_data.gpio0a_iomux_l));
+}
+
+static void soc_sleep_config(void)
+{
+	ddr_data.gpio0b_iomux_l = mmio_read_32(PMU0IOC_BASE + 0x8);
+
+	pmu_sleep_config();
+	ddr_sleep_config();
+}
+
+static void soc_sleep_restore(void)
+{
+	ddr_sleep_config_restore();
+	pmu_sleep_restore();
+
+	mmio_write_32(PMU0IOC_BASE + 0x8, WITH_16BITS_WMSK(ddr_data.gpio0b_iomux_l));
+}
+
+static void pm_pll_suspend(void)
+{
+	ddr_data.cru_mode_con = mmio_read_32(CRU_BASE + 0x280);
+	ddr_data.busscru_mode_con = mmio_read_32(BUSSCRU_BASE + 0x280);
+	ddr_data.pmu2_bisr_con0 = mmio_read_32(PMU_BASE + PMU2_BISR_CON(0));
+	ddr_data.cpll_con0 = mmio_read_32(CRU_BASE + CRU_PLLS_CON(2, 0));
+	ddr_data.pmu1cru_clksel_con1 = mmio_read_32(PMU1CRU_BASE + CRU_CLKSEL_CON(1));
+
+	/* disable bisr_init */
+	mmio_write_32(PMU_BASE + PMU2_BISR_CON(0), BITS_WITH_WMASK(0, 0x1, 0));
+	/* cpll bypass */
+	mmio_write_32(CRU_BASE + CRU_PLLS_CON(2, 0), BITS_WITH_WMASK(1u, 1u, 15));
+}
+
+static void pm_pll_restore(void)
+{
+	pm_pll_wait_lock(CRU_BASE + CRU_PLLS_CON(2, 0));
+
+	mmio_write_32(CRU_BASE + 0x280, WITH_16BITS_WMSK(ddr_data.cru_mode_con));
+	mmio_write_32(BUSSCRU_BASE + 0x280, WITH_16BITS_WMSK(ddr_data.busscru_mode_con));
+	mmio_write_32(CRU_BASE + CRU_PLLS_CON(2, 0), WITH_16BITS_WMSK(ddr_data.cpll_con0));
+	dsb();
+	isb();
+	mmio_write_32(PMU_BASE + PMU2_BISR_CON(0), WITH_16BITS_WMSK(ddr_data.pmu2_bisr_con0));
+}
+
+int rockchip_soc_sys_pwr_dm_suspend(void)
+{
+	clk_gate_con_save();
+	clk_gate_con_disable();
+
+	psram_sleep_cfg->pm_flag &= ~PM_WARM_BOOT_BIT;
+
+	pmu_power_domains_suspend();
+	soc_sleep_config();
+	dsu_core_save();
+	pm_pll_suspend();
+
+	return 0;
+}
+
+int rockchip_soc_sys_pwr_dm_resume(void)
+{
+	pm_pll_restore();
+	dsu_core_restore();
+	soc_sleep_restore();
+	pmu_power_domains_resume();
+	plat_rockchip_gic_cpuif_enable();
+
+	psram_sleep_cfg->pm_flag |= PM_WARM_BOOT_BIT;
+
+	clk_gate_con_restore();
+
+	return 0;
+}
+
+void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(const
+					psci_power_state_t *target_state)
+{
+	psci_power_down_wfi();
+}
+
+void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
+{
+	cpus_pd_req_enter_wfi();
+	psci_power_down_wfi();
+}
+
+void __dead2 rockchip_soc_soft_reset(void)
+{
+	/* pll slow mode */
+	mmio_write_32(CRU_BASE + 0x280, 0x03ff0000);
+	mmio_write_32(BIGCORE0CRU_BASE + 0x280, 0x00030000);
+	mmio_write_32(BIGCORE0CRU_BASE + 0x300, 0x60000000);
+	mmio_write_32(BIGCORE0CRU_BASE + 0x304, 0x00600000);
+	mmio_write_32(BIGCORE1CRU_BASE + 0x280, 0x00030000);
+	mmio_write_32(BIGCORE1CRU_BASE + 0x300, 0x60000000);
+	mmio_write_32(BIGCORE1CRU_BASE + 0x304, 0x00600000);
+	mmio_write_32(DSUCRU_BASE + 0x280, 0x00030000);
+	mmio_write_32(DSUCRU_BASE + 0x318, 0x30600000);
+	mmio_write_32(DSUCRU_BASE + 0x31c, 0x30600000);
+	mmio_write_32(DSUCRU_BASE + 0x304, 0x00010000);
+	mmio_write_32(BUSSCRU_BASE + 0x280, 0x0003000);
+	dsb();
+	isb();
+
+	mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, GLB_SRST_FST_CFG_VAL);
+
+	/*
+	 * Maybe the HW needs some times to reset the system,
+	 * so we do not hope the core to execute valid codes.
+	 */
+	psci_power_down_wfi();
+}
+
+void __dead2 rockchip_soc_system_off(void)
+{
+	/* set pmic_sleep pin(gpio0_a2) to gpio mode */
+	mmio_write_32(PMU0IOC_BASE + 0, BITS_WITH_WMASK(0, 0xf, 8));
+
+	/* config output */
+	mmio_write_32(GPIO0_BASE + GPIO_SWPORT_DDR_L,
+		      BITS_WITH_WMASK(1, 0x1, 2));
+
+	/* config output high level */
+	mmio_write_32(GPIO0_BASE + GPIO_SWPORT_DR_L,
+		      BITS_WITH_WMASK(1, 0x1, 2));
+	dsb();
+
+	/*
+	 * Maybe the HW needs some times to reset the system,
+	 * so we do not hope the core to execute valid codes.
+	 */
+	psci_power_down_wfi();
+}
+
+static void rockchip_pmu_pd_init(void)
+{
+	mmio_write_32(PMU_BASE + PMU2_BISR_CON(1), 0xffffffff);
+	mmio_write_32(PMU_BASE + PMU2_BISR_CON(2), 0xffffffff);
+	mmio_write_32(PMU_BASE + PMU2_BISR_CON(3), 0xffffffff);
+
+	pmu_set_power_domain(PD_PHP, pmu_pd_on);
+	pmu_set_power_domain(PD_PCIE, pmu_pd_on);
+	pmu_set_power_domain(PD_GMAC, pmu_pd_on);
+	pmu_set_power_domain(PD_SECURE, pmu_pd_on);
+	pmu_set_power_domain(PD_VOP, pmu_pd_on);
+	pmu_set_power_domain(PD_VO0, pmu_pd_on);
+	pmu_set_power_domain(PD_VO1, pmu_pd_on);
+}
+
+#define PLL_LOCKED_TIMEOUT 600000U
+
+void pm_pll_wait_lock(uint32_t pll_base)
+{
+	int delay = PLL_LOCKED_TIMEOUT;
+
+	if ((mmio_read_32(pll_base + CRU_PLL_CON(1)) & CRU_PLLCON1_PWRDOWN) != 0)
+		return;
+
+	while (delay-- >= 0) {
+		if (mmio_read_32(pll_base + CRU_PLL_CON(6)) &
+		    CRU_PLLCON6_LOCK_STATUS)
+			break;
+		udelay(1);
+	}
+
+	if (delay <= 0)
+		ERROR("Can't wait pll(0x%x) lock\n", pll_base);
+}
+
+void rockchip_plat_mmu_el3(void)
+{
+	/* Nothing todo */
+}
+
+void plat_rockchip_pmu_init(void)
+{
+	int cpu;
+
+	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
+		cpuson_flags[cpu] = 0;
+
+	psram_sleep_cfg->sp = PSRAM_SP_TOP;
+	psram_sleep_cfg->ddr_func = (uint64_t)ddr_resume;
+	psram_sleep_cfg->ddr_data = 0;
+	psram_sleep_cfg->ddr_flag = 0;
+	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
+	psram_sleep_cfg->pm_flag = PM_WARM_BOOT_BIT;
+
+	nonboot_cpus_off();
+
+	/*
+	 * When perform idle operation, corresponding clock can be
+	 * opened or gated automatically.
+	 */
+	mmio_write_32(PMU_BASE + PMU2_BIU_AUTO_CON(0), 0xffffffff);
+	mmio_write_32(PMU_BASE + PMU2_BIU_AUTO_CON(1), 0xffffffff);
+	mmio_write_32(PMU_BASE + PMU2_BIU_AUTO_CON(2), 0x00070007);
+
+	rockchip_pmu_pd_init();
+
+	/* grf_con_pmic_sleep_sel
+	 * pmic sleep function selection
+	 * 1'b0: From reset pulse generator, can reset external PMIC
+	 * 1'b1: From pmu block, only support sleep function for external PMIC
+	 */
+	mmio_write_32(PMU0GRF_BASE + PMU0_GRF_SOC_CON(3), 0x03ff0000);
+
+	/* pmusram remap to 0xffff0000 */
+	mmio_write_32(PMU0SGRF_BASE + PMU0_SGRF_SOC_CON(2), 0x00030001);
+
+	pm_reg_rgns_init();
+}
diff --git a/plat/rockchip/rk3588/drivers/pmu/pmu.h b/plat/rockchip/rk3588/drivers/pmu/pmu.h
new file mode 100644
index 0000000..7d8288c
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/pmu/pmu.h
@@ -0,0 +1,589 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMU_H__
+#define __PMU_H__
+
+#include <lib/mmio.h>
+
+#define PMU0_PWR_CON			0x0000
+#define PMU0_WAKEUP_INT_CON		0x0008
+#define PMU0_WAKEUP_INT_ST		0x000c
+#define PMU0_PMIC_STABLE_CNT_THRES	0x0010
+#define PMU0_WAKEUP_RST_CLR_CNT_THRES	0x0014
+#define PMU0_OSC_STABLE_CNT_THRES	0x0018
+#define PMU0_PWR_CHAIN_STABLE_CON	0x001c
+#define PMU0_DDR_RET_CON(i)		(0x0020 + (i) * 4)
+#define PMU0_INFO_TX_CON		0x0030
+
+#define PMU1_VERSION_ID			0x4000
+#define PMU1_PWR_CON			0x4004
+#define PMU1_PWR_FSM			0x4008
+#define PMU1_INT_MASK_CON		0x400c
+#define PMU1_WAKEUP_INT_CON		0x4010
+#define PMU1_WAKEUP_INT_ST		0x4014
+#define PMU1_WAKEUP_EDGE_CON		0x4018
+#define PMU1_WAKEUP_EDGE_ST		0x401c
+#define PMU1_DDR_PWR_CON(i)		(0x4020 + (i) * 4)
+#define PMU1_DDR_PWR_SFTCON(i)		(0x4030 + (i) * 4)
+#define PMU1_DDR_PWR_FSM		0x4040
+#define PMU1_DDR_PWR_ST			0x4044
+#define PMU1_CRU_PWR_CON		0x4050
+#define PMU1_CRU_PWR_SFTCON		0x4054
+#define PMU1_CRU_PWR_FSM		0x4058
+#define PMU1_PLLPD_CON(i)		(0x4060 + (i) * 4)
+#define PMU1_PLLPD_SFTCON(i)		(0x4068 + (i) * 4)
+#define PMU1_STABLE_CNT_THRESH		0x4080
+#define PMU1_OSC_STABLE_CNT_THRESH	0x4084
+#define PMU1_WAKEUP_RST_CLR_CNT_THRESH	0x4088
+#define PMU1_PLL_LOCK_CNT_THRESH	0x408c
+#define PMU1_WAKEUP_TIMEOUT_THRESH	0x4094
+#define PMU1_PWM_SWITCH_CNT_THRESH	0x4098
+#define PMU1_SYS_REG(i)			(0x4100 + (i) * 4)
+
+#define PMU2_PWR_CON1			0x8000
+#define PMU2_DSU_PWR_CON		0x8004
+#define PMU2_DSU_PWR_SFTCON		0x8008
+#define PMU2_DSU_AUTO_PWR_CON		0x800c
+#define PMU2_CPU_AUTO_PWR_CON(i)	(0x8010 + (i) * 4)
+#define PMU2_CPU_PWR_SFTCON(i)		(0x8030 + (i) * 4)
+#define PMU2_CORE_PWR_CON(i)		(0x8050 + (i) * 4)
+#define PMU2_CORE_PWR_SFTCON(i)		(0x8058 + (i) * 4)
+#define PMU2_CORE_AUTO_PWR_CON(i)	(0x8060 + (i) * 4)
+#define PMU2_CLUSTER_NOC_AUTO_CON	0x8068
+#define PMU2_CLUSTER_DBG_PWR_CON	0x806c
+#define PMU2_CLUSTER_IDLE_CON		0x8070
+#define PMU2_CLUSTER_IDLE_SFTCON	0x8074
+#define PMU2_CLUSTER_IDLE_ACK		0x8078
+#define PMU2_CLUSTER_IDLE_ST		0x807c
+#define PMU2_CLUSTER_ST			0x8080
+#define PMU2_SCU_PWR_FSM_STATUS(i)	(0x8084 + (i) * 4)
+#define PMU2_CORE_PCHANNEL_STATUS(i)	(0x808c + (i) * 4)
+#define PMU2_CPU_PWR_CHAIN_STABLE_CON	0x8098
+#define PMU2_CLUSTER_MEMPWR_GATE_SFTCON	0x809c
+#define PMU2_DSU_STABLE_CNT_THRESH	0x80b0
+#define PMU2_DSU_PWRUP_CNT_THRESH	0x80b4
+#define PMU2_DSU_PWRDN_CNT_THRESH	0x80b8
+#define PMU2_CORE0_STABLE_CNT_THRESH	0x80bc
+#define PMU2_CORE0_PWRUP_CNT_THRESH	0x80c0
+#define PMU2_CORE0_PWRDN_CNT_THRESH	0x80c4
+#define PMU2_CORE1_STABLE_CNT_THRESH	0x80c8
+#define PMU2_CORE1_PWRUP_CNT_THRESH	0x80cc
+#define PMU2_CORE1_PWRDN_CNT_THRESH	0x80d0
+#define PMU2_DBG_RST_CNT_THRESH(i)	(0x80d4 + (i) * 4)
+#define PMU2_BUS_IDLE_CON(i)		(0x8100 + (i) * 4)
+#define PMU2_BUS_IDLE_SFTCON(i)		(0x810c + (i) * 4)
+#define PMU2_BUS_IDLE_ACK(i)		(0x8118 + (i) * 4)
+#define PMU2_BUS_IDLE_ST(i)		(0x8120 + (i) * 4)
+#define PMU2_BIU_AUTO_CON(i)		(0x8128 + (i) * 4)
+#define PMU2_PWR_GATE_CON(i)		(0x8140 + (i) * 4)
+#define PMU2_PWR_GATE_SFTCON(i)		(0x814c + (i) * 4)
+#define PMU2_VOL_GATE_CON(i)		(0x8158 + (i) * 4)
+#define PMU2_PWR_UP_CHAIN_STABLE_CON(i)	(0x8164 + (i) * 4)
+#define PMU2_PWR_DWN_CHAIN_STABLE_CON(i)(0x8170 + (i) * 4)
+#define PMU2_PWR_STABLE_CHAIN_CNT_THRES	0x817c
+#define PMU2_PWR_GATE_ST(i)		(0x8180 + (i) * 4)
+#define PMU2_PWR_GATE_FSM		0x8188
+#define PMU2_VOL_GATE_FAST_CON		0x818c
+#define PMU2_GPU_PWRUP_CNT		0x8190
+#define PMU2_GPU_PWRDN_CNT		0x8194
+#define PMU2_NPU_PWRUP_CNT		0x8198
+#define PMU2_NPU_PWRDN_CNT		0x819c
+#define PMU2_MEMPWR_GATE_SFTCON(i)	(0x81a0 + (i) * 4)
+#define PMU2_MEMPWR_MD_GATE_SFTCON(i)	(0x81b0 + (i) * 4)
+#define PMU2_MEMPWR_MD_GATE_STATUS	0x81bc
+#define PMU2_SUBMEM_PWR_ACK_BYPASS(i)	(0x81c0 + (i) * 4)
+#define PMU2_QCHANNEL_PWR_CON		0x81d0
+#define PMU2_QCHANNEL_PWR_SFTCON	0x81d4
+#define PMU2_QCHANNEL_STATUS		0x81d8
+#define PMU2_DEBUG_INFO_SEL		0x81e0
+#define PMU2_VOP_SUBPD_STATE		0x81e4
+#define PMU2_PWR_CHAIN0_ST(i)		(0x81e8 + (i) * 4)
+#define PMU2_PWR_CHAIN1_ST(i)		(0x81f0 + (i) * 4)
+#define PMU2_PWR_MEM_ST(i)		(0x81f8 + (i) * 4)
+#define PMU2_BISR_CON(i)		(0x8200 + (i) * 4)
+#define PMU2_BISR_STATUS(i)		(0x8280 + (i) * 4)
+
+#define PMU2_QCH_PWR_MSK		0x7f
+
+#define PD_CTR_LOOP			500
+#define PD_CHECK_LOOP			500
+#define WFEI_CHECK_LOOP			500
+#define BUS_IDLE_LOOP			1000
+#define QCH_PWR_LOOP			5000
+
+/* PMU1SCRU */
+#define PMU1SCRU_GATE_CON(i)		(0x800 + (i) * 4)
+
+/* PMU_GRF */
+#define PMU0_GRF_SOC_CON(i)		((i) * 4)
+#define PMU0_GRF_OS_REGS(i)		(0x80 + ((i) - 8) * 4)
+#define PMU1_GRF_SOC_CON(i)		((i) * 4)
+#define PMU0_GRF_IO_RET_CON(i)		(0x20 + (i) * 4)
+
+/* PMU_SGRF */
+#define PMU0_SGRF_SOC_CON(i)		((i) * 4)
+#define PMU1_SGRF_SOC_CON(i)		((i) * 4)
+
+/* sys grf */
+#define GRF_CPU_STATUS0			0x0420
+
+#define CORES_PM_DISABLE		0x0
+#define PD_CHECK_LOOP			500
+#define WFEI_CHECK_LOOP			500
+
+/* The ways of cores power domain contorlling */
+enum cores_pm_ctr_mode {
+	core_pwr_pd = 0,
+	core_pwr_wfi = 1,
+	core_pwr_wfi_int = 2
+};
+
+/* PMU0_PWR_CON */
+enum pmu0_pwr_con {
+	pmu0_powermode_en = 0,
+	pmu0_pmu1_pwr_bypass = 1,
+	pmu0_pmu1_bus_bypass = 2,
+	pmu0_wkup_bypass = 3,
+	pmu0_pmic_bypass = 4,
+	pmu0_reset_bypass = 5,
+	pmu0_freq_sw_bypass = 6,
+	pmu0_osc_dis_bypass = 7,
+	pmu0_pmu1_pwr_gt_en = 8,
+	pmu0_pmu1_pwr_gt_sft_en = 9,
+	pmu0_pmu1_mem_gt_sft_en = 10,
+	pmu0_pmu1_bus_idle_en = 11,
+	pmu0_pmu1_bus_idle_sft_en = 12,
+	pmu0_pmu1_biu_auto_en = 13,
+	pmu0_pwr_off_io_en = 14,
+};
+
+/* PMU1_PWR_CON */
+enum pmu1_pwr_con {
+	powermode_en = 0,
+	dsu_bypass = 1,
+	bus_bypass = 4,
+	ddr_bypass = 5,
+	pwrdn_bypass = 6,
+	cru_bypass = 7,
+	qch_bypass = 8,
+	core_bypass = 9,
+	cpu_sleep_wfi_dis = 12,
+};
+
+/* PMU1_DDR_PWR_CON */
+enum pmu1_ddr_pwr_con {
+	ddr_sref_en = 0,
+	ddr_sref_a_en = 1,
+	ddrio_ret_en = 2,
+	ddrio_ret_exit_en = 5,
+	ddrio_rstiov_en = 6,
+	ddrio_rstiov_exit_en = 7,
+	ddr_gating_a_en = 8,
+	ddr_gating_c_en = 9,
+	ddr_gating_p_en = 10,
+};
+
+/* PMU_CRU_PWR_CON */
+enum pmu1_cru_pwr_con {
+	alive_32k_en = 0,
+	osc_dis_en = 1,
+	wakeup_rst_en = 2,
+	input_clamp_en = 3,
+	alive_osc_mode_en = 4,
+	power_off_en = 5,
+	pwm_switch_en = 6,
+	pwm_gpio_ioe_en = 7,
+	pwm_switch_io = 8,
+	pd_clk_src_gate_en = 9,
+};
+
+/* PMU_PLLPD_CON */
+enum pmu1_pllpd_con {
+	B0PLL_PD_EN,
+	B1PLL_PD_EN,
+	LPLL_PD_EN,
+	D0APLL_PD_EN,
+	D0BPLL_PD_EN,
+	D1APLL_PD_EN,
+	D1BPLL_PD_EN,
+	D2APLL_PD_EN,
+	D2BPLL_PD_EN,
+	D3APLL_PD_EN,
+	D3BPLL_PD_EN,
+	V0PLL_PD_EN,
+	AUPLL_PD_EN,
+	GPLL_PD_EN,
+	CPLL_PD_EN,
+	NPLL_PD_EN,
+	PPLL_PD_EN = 0,
+	SPLL_PD_EN = 1,
+};
+
+enum pmu1_wakeup_int {
+	WAKEUP_CPU0_INT_EN,
+	WAKEUP_CPU1_INT_EN,
+	WAKEUP_CPU2_INT_EN,
+	WAKEUP_CPU3_INT_EN,
+	WAKEUP_CPU4_INT_EN,
+	WAKEUP_CPU5_INT_EN,
+	WAKEUP_CPU6_INT_EN,
+	WAKEUP_CPU7_INT_EN,
+	WAKEUP_GPIO0_INT_EN,
+	WAKEUP_SDMMC_EN,
+	WAKEUP_SDIO_EN,
+	WAKEUP_USBDEV_EN,
+	WAKEUP_UART0_EN,
+	WAKEUP_VAD_EN,
+	WAKEUP_TIMER_EN,
+	WAKEUP_SOC_INT_EN,
+	WAKEUP_TIMEROUT_EN,
+	WAKEUP_PMUMCU_CEC_EN = 20,
+};
+
+enum pmu2_dsu_auto_pwr_con {
+	dsu_pm_en = 0,
+	dsu_pm_int_wakeup_en = 1,
+	dsu_pm_sft_wakeup_en = 3,
+};
+
+enum pmu2_cpu_auto_pwr_con {
+	cpu_pm_en = 0,
+	cpu_pm_int_wakeup_en = 1,
+	cpu_pm_sft_wakeup_en = 3,
+};
+
+enum pmu2_core_auto_pwr_con {
+	core_pm_en = 0,
+	core_pm_int_wakeup_en = 1,
+	core_pm_int_wakeup_glb_msk = 2,
+	core_pm_sft_wakeup_en = 3,
+};
+
+enum pmu2_dsu_power_con {
+	DSU_PWRDN_EN,
+	DSU_PWROFF_EN,
+	BIT_FULL_EN,
+	DSU_RET_EN,
+	CLUSTER_CLK_SRC_GT_EN,
+};
+
+enum pmu2_core_power_con {
+	CORE_PWRDN_EN,
+	CORE_PWROFF_EN,
+	CORE_CPU_PWRDN_EN,
+	CORE_PWR_CNT_EN,
+};
+
+enum pmu2_cluster_idle_con {
+	IDLE_REQ_BIGCORE0_EN = 0,
+	IDLE_REQ_BIGCORE1_EN = 2,
+	IDLE_REQ_DSU_EN = 4,
+	IDLE_REQ_LITDSU_EN = 5,
+	IDLE_REQ_ADB400_CORE_QCH_EN = 6,
+};
+
+enum qos_id {
+	QOS_ISP0_MWO = 0,
+	QOS_ISP0_MRO = 1,
+	QOS_ISP1_MWO = 2,
+	QOS_ISP1_MRO = 3,
+	QOS_VICAP_M0 = 4,
+	QOS_VICAP_M1 = 5,
+	QOS_FISHEYE0 = 6,
+	QOS_FISHEYE1 = 7,
+	QOS_VOP_M0 = 8,
+	QOS_VOP_M1 = 9,
+	QOS_RKVDEC0 = 10,
+	QOS_RKVDEC1 = 11,
+	QOS_AV1 = 12,
+	QOS_RKVENC0_M0RO = 13,
+	QOS_RKVENC0_M1RO = 14,
+	QOS_RKVENC0_M2WO = 15,
+	QOS_RKVENC1_M0RO = 16,
+	QOS_RKVENC1_M1RO = 17,
+	QOS_RKVENC1_M2WO = 18,
+	QOS_DSU_M0 = 19,
+	QOS_DSU_M1 = 20,
+	QOS_DSU_MP = 21,
+	QOS_DEBUG = 22,
+	QOS_GPU_M0 = 23,
+	QOS_GPU_M1 = 24,
+	QOS_GPU_M2 = 25,
+	QOS_GPU_M3 = 26,
+	QOS_NPU1 = 27,
+	QOS_NPU0_MRO = 28,
+	QOS_NPU2 = 29,
+	QOS_NPU0_MWR = 30,
+	QOS_MCU_NPU = 31,
+	QOS_JPEG_DEC = 32,
+	QOS_JPEG_ENC0 = 33,
+	QOS_JPEG_ENC1 = 34,
+	QOS_JPEG_ENC2 = 35,
+	QOS_JPEG_ENC3 = 36,
+	QOS_RGA2_MRO = 37,
+	QOS_RGA2_MWO = 38,
+	QOS_RGA3_0 = 39,
+	QOS_RGA3_1 = 40,
+	QOS_VDPU = 41,
+	QOS_IEP = 42,
+	QOS_HDCP0 = 43,
+	QOS_HDCP1 = 44,
+	QOS_HDMIRX = 45,
+	QOS_GIC600_M0 = 46,
+	QOS_GIC600_M1 = 47,
+	QOS_MMU600PCIE_TCU = 48,
+	QOS_MMU600PHP_TBU = 49,
+	QOS_MMU600PHP_TCU = 50,
+	QOS_USB3_0 = 51,
+	QOS_USB3_1 = 52,
+	QOS_USBHOST_0 = 53,
+	QOS_USBHOST_1 = 54,
+	QOS_EMMC = 55,
+	QOS_FSPI = 56,
+	QOS_SDIO = 57,
+	QOS_DECOM = 58,
+	QOS_DMAC0 = 59,
+	QOS_DMAC1 = 60,
+	QOS_DMAC2 = 61,
+	QOS_GIC600M = 62,
+	QOS_DMA2DDR = 63,
+	QOS_MCU_DDR = 64,
+	QOS_VAD = 65,
+	QOS_MCU_PMU = 66,
+	QOS_CRYPTOS = 67,
+	QOS_CRYPTONS = 68,
+	QOS_DCF = 69,
+	QOS_SDMMC = 70,
+};
+
+enum pmu2_pdid {
+	PD_GPU = 0,
+	PD_NPU = 1,
+	PD_VCODEC = 2,
+	PD_NPUTOP = 3,
+	PD_NPU1 = 4,
+	PD_NPU2 = 5,
+	PD_VENC0 = 6,
+	PD_VENC1 = 7,
+	PD_RKVDEC0 = 8,
+	PD_RKVDEC1 = 9,
+	PD_VDPU = 10,
+	PD_RGA30 = 11,
+	PD_AV1 = 12,
+	PD_VI = 13,
+	PD_FEC = 14,
+	PD_ISP1 = 15,
+	PD_RGA31 = 16,
+	PD_VOP = 17,
+	PD_VO0 = 18,
+	PD_VO1 = 19,
+	PD_AUDIO = 20,
+	PD_PHP = 21,
+	PD_GMAC = 22,
+	PD_PCIE = 23,
+	PD_NVM = 24,
+	PD_NVM0 = 25,
+	PD_SDIO = 26,
+	PD_USB = 27,
+	PD_SECURE = 28,
+	PD_SDMMC = 29,
+	PD_CRYPTO = 30,
+	PD_CENTER = 31,
+	PD_DDR01 = 32,
+	PD_DDR23 = 33,
+};
+
+enum pmu2_pd_repair_id {
+	PD_RPR_PMU = 0,
+	PD_RPR_GPU = 1,
+	PD_RPR_NPUTOP = 2,
+	PD_RPR_NPU1 = 3,
+	PD_RPR_NPU2 = 4,
+	PD_RPR_VENC0 = 5,
+	PD_RPR_VENC1 = 6,
+	PD_RPR_RKVDEC0 = 7,
+	PD_RPR_RKVDEC1 = 8,
+	PD_RPR_VDPU = 9,
+	PD_RPR_RGA30 = 10,
+	PD_RPR_AV1 = 11,
+	PD_RPR_VI = 12,
+	PD_RPR_FEC = 13,
+	PD_RPR_ISP1 = 14,
+	PD_RPR_RGA31 = 15,
+	PD_RPR_VOP = 16,
+	PD_RPR_VO0 = 17,
+	PD_RPR_VO1 = 18,
+	PD_RPR_AUDIO = 19,
+	PD_RPR_PHP = 20,
+	PD_RPR_GMAC = 21,
+	PD_RPR_PCIE = 22,
+	PD_RPR_NVM0 = 23,
+	PD_RPR_SDIO = 24,
+	PD_RPR_USB = 25,
+	PD_RPR_SDMMC = 26,
+	PD_RPR_CRYPTO = 27,
+	PD_RPR_CENTER = 28,
+	PD_RPR_DDR01 = 29,
+	PD_RPR_DDR23 = 30,
+	PD_RPR_BUS = 31,
+};
+
+enum pmu2_bus_id {
+	BUS_ID_GPU = 0,
+	BUS_ID_NPUTOP = 1,
+	BUS_ID_NPU1 = 2,
+	BUS_ID_NPU2 = 3,
+	BUS_ID_RKVENC0 = 4,
+	BUS_ID_RKVENC1 = 5,
+	BUS_ID_RKVDEC0 = 6,
+	BUS_ID_RKVDEC1 = 7,
+	BUS_ID_VDPU = 8,
+	BUS_ID_AV1 = 9,
+	BUS_ID_VI = 10,
+	BUS_ID_ISP = 11,
+	BUS_ID_RGA31 = 12,
+	BUS_ID_VOP = 13,
+	BUS_ID_VOP_CHANNEL = 14,
+	BUS_ID_VO0 = 15,
+	BUS_ID_VO1 = 16,
+	BUS_ID_AUDIO = 17,
+	BUS_ID_NVM = 18,
+	BUS_ID_SDIO = 19,
+	BUS_ID_USB = 20,
+	BUS_ID_PHP = 21,
+	BUS_ID_VO1USBTOP = 22,
+	BUS_ID_SECURE = 23,
+	BUS_ID_SECURE_CENTER_CHANNEL = 24,
+	BUS_ID_SECURE_VO1USB_CHANNEL = 25,
+	BUS_ID_CENTER = 26,
+	BUS_ID_CENTER_CHANNEL = 27,
+	BUS_ID_MSCH0 = 28,
+	BUS_ID_MSCH1 = 29,
+	BUS_ID_MSCH2 = 30,
+	BUS_ID_MSCH3 = 31,
+	BUS_ID_MSCH = 32,
+	BUS_ID_BUS = 33,
+	BUS_ID_TOP = 34,
+};
+
+enum pmu2_mem_st {
+	PD_NPU_TOP_MEM_ST = 11,
+	PD_NPU1_MEM_ST = 12,
+	PD_NPU2_MEM_ST = 13,
+	PD_VENC0_MEM_ST = 14,
+	PD_VENC1_MEM_ST = 15,
+	PD_RKVDEC0_MEM_ST = 16,
+	PD_RKVDEC1_MEM_ST = 17,
+	PD_RGA30_MEM_ST = 19,
+	PD_AV1_MEM_ST = 20,
+	PD_VI_MEM_ST = 21,
+	PD_FEC_MEM_ST = 22,
+	PD_ISP1_MEM_ST = 23,
+	PD_RGA31_MEM_ST = 24,
+	PD_VOP_MEM_ST = 25,
+	PD_VO0_MEM_ST = 26,
+	PD_VO1_MEM_ST = 27,
+	PD_AUDIO_MEM_ST = 28,
+	PD_PHP_MEM_ST = 29,
+	PD_GMAC_MEM_ST = 30,
+	PD_PCIE_MEM_ST = 31,
+	PD_NVM0_MEM_ST = 33,
+	PD_SDIO_MEM_ST = 34,
+	PD_USB_MEM_ST = 35,
+	PD_SDMMC_MEM_ST = 37,
+};
+
+enum pmu2_qid {
+	QID_PHPMMU_TBU = 0,
+	QID_PHPMMU_TCU = 1,
+	QID_PCIEMMU_TBU0 = 2,
+	QID_PCIEMU_TCU = 3,
+	QID_PHP_GICITS = 4,
+	QID_BUS_GICITS0 = 5,
+	QID_BUS_GICITS1 = 6,
+};
+
+/* PMU_DSU_PWR_CON */
+enum pmu_dsu_pwr_con {
+	DSU_PWRDN_ENA = 2,
+	DSU_PWROFF_ENA,
+	DSU_RET_ENA = 6,
+	CLUSTER_CLK_SRC_GATE_ENA,
+	DSU_PWR_CON_END
+};
+
+enum cpu_power_state {
+	CPU_POWER_ON,
+	CPU_POWER_OFF,
+	CPU_EMULATION_OFF,
+	CPU_RETENTION,
+	CPU_DEBUG
+};
+
+enum dsu_power_state {
+	DSU_POWER_ON,
+	CLUSTER_TRANSFER_IDLE,
+	DSU_POWER_DOWN,
+	DSU_OFF,
+	DSU_WAKEUP,
+	DSU_POWER_UP,
+	CLUSTER_TRANSFER_RESUME,
+	DSU_FUNCTION_RETENTION
+};
+
+/* PMU2_CLUSTER_STS 0x8080 */
+enum pmu2_cluster_sts_bits {
+	pd_cpu0_dwn = 0,
+	pd_cpu1_dwn,
+	pd_cpu2_dwn,
+	pd_cpu3_dwn,
+	pd_cpu4_dwn,
+	pd_cpu5_dwn,
+	pd_cpu6_dwn,
+	pd_cpu7_dwn,
+	pd_core0_dwn,
+	pd_core1_dwn
+};
+
+#define CLUSTER_STS_NONBOOT_CPUS_DWN	0xfe
+
+enum cpu_off_trigger {
+	CPU_OFF_TRIGGER_WFE = 0,
+	CPU_OFF_TRIGGER_REQ_EML,
+	CPU_OFF_TRIGGER_REQ_WFI,
+	CPU_OFF_TRIGGER_REQ_WFI_NBT_CPU,
+	CPU_OFF_TRIGGER_REQ_WFI_NBT_CPU_SRAM
+};
+
+/*****************************************************************************
+ * power domain on or off
+ *****************************************************************************/
+enum pmu_pd_state {
+	pmu_pd_on = 0,
+	pmu_pd_off = 1
+};
+
+enum bus_state {
+	bus_active,
+	bus_idle,
+};
+
+#define RK_CPU_STATUS_OFF		0
+#define RK_CPU_STATUS_ON		1
+#define RK_CPU_STATUS_BUSY		-1
+
+#define PD_CTR_LOOP			500
+#define MAX_WAIT_COUNT			500
+
+#define pmu_bus_idle_st(id)	\
+	(!!(mmio_read_32(PMU_BASE + PMU2_BUS_IDLE_ST((id) / 32)) & BIT((id) % 32)))
+
+#define pmu_bus_idle_ack(id)	\
+	(!!(mmio_read_32(PMU_BASE + PMU2_BUS_IDLE_ACK((id) / 32)) & BIT((id) % 32)))
+
+void pm_pll_wait_lock(uint32_t pll_base);
+#endif /* __PMU_H__ */
diff --git a/plat/rockchip/rk3588/drivers/scmi/rk3588_clk.c b/plat/rockchip/rk3588/drivers/scmi/rk3588_clk.c
new file mode 100644
index 0000000..ab3af5f
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/scmi/rk3588_clk.c
@@ -0,0 +1,2463 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <drivers/delay_timer.h>
+#include <drivers/scmi.h>
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#include <plat_private.h>
+#include "rk3588_clk.h"
+#include <rockchip_sip_svc.h>
+#include <scmi_clock.h>
+#include <soc.h>
+
+enum pll_type_sel {
+	PLL_SEL_AUTO, /* all plls (normal pll or pvtpll) */
+	PLL_SEL_PVT,
+	PLL_SEL_NOR,
+	PLL_SEL_AUTO_NOR /* all normal plls (apll/gpll/npll) */
+};
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+#define RK3588_CPUL_PVTPLL_CON0_L	0x40
+#define RK3588_CPUL_PVTPLL_CON0_H	0x44
+#define RK3588_CPUL_PVTPLL_CON1		0x48
+#define RK3588_CPUL_PVTPLL_CON2		0x4c
+#define RK3588_CPUB_PVTPLL_CON0_L	0x00
+#define RK3588_CPUB_PVTPLL_CON0_H	0x04
+#define RK3588_CPUB_PVTPLL_CON1		0x08
+#define RK3588_CPUB_PVTPLL_CON2		0x0c
+#define RK3588_DSU_PVTPLL_CON0_L	0x60
+#define RK3588_DSU_PVTPLL_CON0_H	0x64
+#define RK3588_DSU_PVTPLL_CON1		0x70
+#define RK3588_DSU_PVTPLL_CON2		0x74
+#define RK3588_GPU_PVTPLL_CON0_L	0x00
+#define RK3588_GPU_PVTPLL_CON0_H	0x04
+#define RK3588_GPU_PVTPLL_CON1		0x08
+#define RK3588_GPU_PVTPLL_CON2		0x0c
+#define RK3588_NPU_PVTPLL_CON0_L	0x0c
+#define RK3588_NPU_PVTPLL_CON0_H	0x10
+#define RK3588_NPU_PVTPLL_CON1		0x14
+#define RK3588_NPU_PVTPLL_CON2		0x18
+#define RK3588_PVTPLL_MAX_LENGTH	0x3f
+
+#define GPLL_RATE			1188000000
+#define CPLL_RATE			1500000000
+#define SPLL_RATE			702000000
+#define AUPLL_RATE			786431952
+#define NPLL_RATE			850000000
+
+#define MAX_RATE_TABLE			16
+
+#define CLKDIV_6BITS_SHF(div, shift)	BITS_WITH_WMASK(div, 0x3fU, shift)
+#define CLKDIV_5BITS_SHF(div, shift)	BITS_WITH_WMASK(div, 0x1fU, shift)
+#define CLKDIV_4BITS_SHF(div, shift)	BITS_WITH_WMASK(div, 0xfU, shift)
+#define CLKDIV_3BITS_SHF(div, shift)	BITS_WITH_WMASK(div, 0x7U, shift)
+#define CLKDIV_2BITS_SHF(div, shift)	BITS_WITH_WMASK(div, 0x3U, shift)
+#define CLKDIV_1BITS_SHF(div, shift)	BITS_WITH_WMASK(div, 0x1U, shift)
+
+#define CPU_PLL_PATH_SLOWMODE		BITS_WITH_WMASK(0U, 0x3U, 0)
+#define CPU_PLL_PATH_NORMAL		BITS_WITH_WMASK(1U, 0x3U, 0)
+#define CPU_PLL_PATH_DEEP_SLOW		BITS_WITH_WMASK(2U, 0x3U, 0)
+
+#define CRU_PLL_POWER_DOWN		BIT_WITH_WMSK(13)
+#define CRU_PLL_POWER_UP		WMSK_BIT(13)
+
+/* core_i: from gpll or apll */
+#define CLK_CORE_I_SEL_APLL		WMSK_BIT(6)
+#define CLK_CORE_I_SEL_GPLL		BIT_WITH_WMSK(6)
+
+/* clk_core:
+ * from normal pll(core_i: gpll or apll) path or direct pass from apll
+ */
+
+/* cpul clk path */
+#define CPUL_CLK_PATH_NOR_XIN		BITS_WITH_WMASK(0U, 0x3U, 14)
+#define CPUL_CLK_PATH_NOR_GPLL		BITS_WITH_WMASK(1U, 0x3U, 14)
+#define CPUL_CLK_PATH_NOR_LPLL		BITS_WITH_WMASK(2U, 0x3U, 14)
+
+#define CPUL_CLK_PATH_LPLL		(BITS_WITH_WMASK(0U, 0x3U, 5) | \
+					BITS_WITH_WMASK(0U, 0x3U, 12))
+#define CPUL_CLK_PATH_DIR_LPLL		(BITS_WITH_WMASK(0x1, 0x3U, 5) | \
+					BITS_WITH_WMASK(1U, 0x3U, 12))
+#define CPUL_CLK_PATH_PVTPLL		(BITS_WITH_WMASK(0x2, 0x3U, 5) | \
+					BITS_WITH_WMASK(2U, 0x3U, 12))
+
+#define CPUL_PVTPLL_PATH_DEEP_SLOW	BITS_WITH_WMASK(0U, 0x1U, 14)
+#define CPUL_PVTPLL_PATH_PVTPLL		BITS_WITH_WMASK(1U, 0x1U, 14)
+
+/* cpub01 clk path */
+#define CPUB01_CLK_PATH_NOR_XIN		BITS_WITH_WMASK(0U, 0x3U, 6)
+#define CPUB01_CLK_PATH_NOR_GPLL	BITS_WITH_WMASK(1U, 0x3U, 6)
+#define CPUB01_CLK_PATH_NOR_B0PLL	BITS_WITH_WMASK(2U, 0x3U, 6)
+
+#define CPUB01_CLK_PATH_B0PLL		BITS_WITH_WMASK(0U, 0x3U, 13)
+#define CPUB01_CLK_PATH_DIR_B0PLL	BITS_WITH_WMASK(1U, 0x3U, 13)
+#define CPUB01_CLK_PATH_B0_PVTPLL	BITS_WITH_WMASK(2U, 0x3U, 13)
+
+#define CPUB01_CLK_PATH_B1PLL		BITS_WITH_WMASK(0U, 0x3U, 5)
+#define CPUB01_CLK_PATH_DIR_B1PLL	BITS_WITH_WMASK(1U, 0x3U, 5)
+#define CPUB01_CLK_PATH_B1_PVTPLL	BITS_WITH_WMASK(2U, 0x3U, 5)
+
+#define CPUB01_PVTPLL_PATH_DEEP_SLOW	BITS_WITH_WMASK(0U, 0x1U, 2)
+#define CPUB01_PVTPLL_PATH_PVTPLL	BITS_WITH_WMASK(1U, 0x1U, 2)
+
+#define CPUB_PCLK_PATH_100M		BITS_WITH_WMASK(0U, 0x3U, 0)
+#define CPUB_PCLK_PATH_50M		BITS_WITH_WMASK(1U, 0x3U, 0)
+#define CPUB_PCLK_PATH_24M		BITS_WITH_WMASK(2U, 0x3U, 0)
+
+/* dsu clk path */
+#define SCLK_DSU_PATH_NOR_B0PLL		BITS_WITH_WMASK(0U, 0x3U, 12)
+#define SCLK_DSU_PATH_NOR_B1PLL		BITS_WITH_WMASK(1U, 0x3U, 12)
+#define SCLK_DSU_PATH_NOR_LPLL		BITS_WITH_WMASK(2U, 0x3U, 12)
+#define SCLK_DSU_PATH_NOR_GPLL		BITS_WITH_WMASK(3U, 0x3U, 12)
+
+#define DSU_PVTPLL_PATH_DEEP_SLOW	BITS_WITH_WMASK(0U, 0x1U, 15)
+#define DSU_PVTPLL_PATH_PVTPLL		BITS_WITH_WMASK(1U, 0x1U, 15)
+
+#define SCLK_DSU_PATH_NOR_PLL		WMSK_BIT(0)
+#define SCLK_DSU_PATH_PVTPLL		BIT_WITH_WMSK(0)
+
+/* npu clk path */
+#define NPU_CLK_PATH_NOR_GPLL		BITS_WITH_WMASK(0U, 0x7U, 7)
+#define NPU_CLK_PATH_NOR_CPLL		BITS_WITH_WMASK(1U, 0x7U, 7)
+#define NPU_CLK_PATH_NOR_AUPLL		BITS_WITH_WMASK(2U, 0x7U, 7)
+#define NPU_CLK_PATH_NOR_NPLL		BITS_WITH_WMASK(3U, 0x7U, 7)
+#define NPU_CLK_PATH_NOR_SPLL		BITS_WITH_WMASK(4U, 0x7U, 7)
+
+#define NPU_CLK_PATH_NOR_PLL		WMSK_BIT(0)
+#define NPU_CLK_PATH_PVTPLL		BIT_WITH_WMSK(0)
+
+/* gpu clk path */
+#define GPU_CLK_PATH_NOR_GPLL		BITS_WITH_WMASK(0U, 0x7U, 5)
+#define GPU_CLK_PATH_NOR_CPLL		BITS_WITH_WMASK(1U, 0x7U, 5)
+#define GPU_CLK_PATH_NOR_AUPLL		BITS_WITH_WMASK(2U, 0x7U, 5)
+#define GPU_CLK_PATH_NOR_NPLL		BITS_WITH_WMASK(3U, 0x7U, 5)
+#define GPU_CLK_PATH_NOR_SPLL		BITS_WITH_WMASK(4U, 0x7U, 5)
+#define GPU_CLK_PATH_NOR_PLL		WMSK_BIT(14)
+#define GPU_CLK_PATH_PVTPLL		BIT_WITH_WMSK(14)
+
+#define PVTPLL_NEED(type, length)	(((type) == PLL_SEL_PVT || \
+					  (type) == PLL_SEL_AUTO) && \
+					 (length))
+
+struct pvtpll_table {
+	unsigned int rate;
+	uint32_t length;
+	uint32_t ring_sel;
+};
+
+struct sys_clk_info_t {
+	struct pvtpll_table *cpul_table;
+	struct pvtpll_table *cpub01_table;
+	struct pvtpll_table *cpub23_table;
+	struct pvtpll_table *gpu_table;
+	struct pvtpll_table *npu_table;
+	unsigned int cpul_rate_count;
+	unsigned int cpub01_rate_count;
+	unsigned int cpub23_rate_count;
+	unsigned int gpu_rate_count;
+	unsigned int npu_rate_count;
+	unsigned long cpul_rate;
+	unsigned long dsu_rate;
+	unsigned long cpub01_rate;
+	unsigned long cpub23_rate;
+	unsigned long gpu_rate;
+	unsigned long npu_rate;
+};
+
+#define RK3588_SCMI_CLOCK(_id, _name, _data, _table, _cnt, _is_s)	\
+{									\
+	.id	= _id,							\
+	.name = _name,							\
+	.clk_ops = _data,						\
+	.rate_table = _table,						\
+	.rate_cnt = _cnt,						\
+	.is_security = _is_s,						\
+}
+
+#define ROCKCHIP_PVTPLL(_rate, _sel, _len)				\
+{									\
+	.rate = _rate##U,						\
+	.ring_sel = _sel,						\
+	.length = _len,							\
+}
+
+static struct pvtpll_table rk3588_cpul_pvtpll_table[] = {
+	/* rate_hz, ring_sel, length */
+	ROCKCHIP_PVTPLL(1800000000, 1, 15),
+	ROCKCHIP_PVTPLL(1704000000, 1, 15),
+	ROCKCHIP_PVTPLL(1608000000, 1, 15),
+	ROCKCHIP_PVTPLL(1416000000, 1, 15),
+	ROCKCHIP_PVTPLL(1200000000, 1, 17),
+	ROCKCHIP_PVTPLL(1008000000, 1, 22),
+	ROCKCHIP_PVTPLL(816000000, 1, 32),
+	ROCKCHIP_PVTPLL(600000000, 0, 0),
+	ROCKCHIP_PVTPLL(408000000, 0, 0),
+	{ /* sentinel */ },
+};
+
+static struct pvtpll_table rk3588_cpub0_pvtpll_table[] = {
+	/* rate_hz, ring_sel, length */
+	ROCKCHIP_PVTPLL(2400000000, 1, 11),
+	ROCKCHIP_PVTPLL(2352000000, 1, 11),
+	ROCKCHIP_PVTPLL(2304000000, 1, 11),
+	ROCKCHIP_PVTPLL(2256000000, 1, 11),
+	ROCKCHIP_PVTPLL(2208000000, 1, 11),
+	ROCKCHIP_PVTPLL(2112000000, 1, 11),
+	ROCKCHIP_PVTPLL(2016000000, 1, 11),
+	ROCKCHIP_PVTPLL(1800000000, 1, 11),
+	ROCKCHIP_PVTPLL(1608000000, 1, 11),
+	ROCKCHIP_PVTPLL(1416000000, 1, 13),
+	ROCKCHIP_PVTPLL(1200000000, 1, 17),
+	ROCKCHIP_PVTPLL(1008000000, 1, 23),
+	ROCKCHIP_PVTPLL(816000000, 1, 33),
+	ROCKCHIP_PVTPLL(600000000, 0, 0),
+	ROCKCHIP_PVTPLL(408000000, 0, 0),
+	{ /* sentinel */ },
+};
+
+static struct
+pvtpll_table rk3588_cpub1_pvtpll_table[ARRAY_SIZE(rk3588_cpub0_pvtpll_table)] = { 0 };
+
+static struct pvtpll_table rk3588_gpu_pvtpll_table[] = {
+	/* rate_hz, ring_sel, length */
+	ROCKCHIP_PVTPLL(1000000000, 1, 12),
+	ROCKCHIP_PVTPLL(900000000, 1, 12),
+	ROCKCHIP_PVTPLL(800000000, 1, 12),
+	ROCKCHIP_PVTPLL(700000000, 1, 13),
+	ROCKCHIP_PVTPLL(600000000, 1, 17),
+	ROCKCHIP_PVTPLL(500000000, 1, 25),
+	ROCKCHIP_PVTPLL(400000000, 1, 38),
+	ROCKCHIP_PVTPLL(300000000, 1, 55),
+	ROCKCHIP_PVTPLL(200000000, 0, 0),
+	{ /* sentinel */ },
+};
+
+static struct pvtpll_table rk3588_npu_pvtpll_table[] = {
+	/* rate_hz, ring_sel, length */
+	ROCKCHIP_PVTPLL(1000000000, 1, 12),
+	ROCKCHIP_PVTPLL(900000000, 1, 12),
+	ROCKCHIP_PVTPLL(800000000, 1, 12),
+	ROCKCHIP_PVTPLL(700000000, 1, 13),
+	ROCKCHIP_PVTPLL(600000000, 1, 17),
+	ROCKCHIP_PVTPLL(500000000, 1, 25),
+	ROCKCHIP_PVTPLL(400000000, 1, 38),
+	ROCKCHIP_PVTPLL(300000000, 1, 55),
+	ROCKCHIP_PVTPLL(200000000, 0, 0),
+	{ /* sentinel */ },
+};
+
+static unsigned long rk3588_cpul_rates[] = {
+	408000000, 600000000, 816000000, 1008000000,
+	1200000000, 1416000000, 1608000000, 1800000063,
+};
+
+static unsigned long rk3588_cpub_rates[] = {
+	408000000, 816000000, 1008000000, 1200000000,
+	1416000000, 1608000000, 1800000000, 2016000000,
+	2208000000, 2304000000, 2400000063
+};
+
+static unsigned long rk3588_gpu_rates[] = {
+	200000000, 300000000, 400000000, 500000000,
+	600000000, 700000000, 800000000, 900000000,
+	1000000063
+};
+
+static unsigned long rk3588_sbus_rates[] = {
+	24000000, 50000000, 100000000, 150000000, 200000000,
+	250000000, 350000000, 700000000
+};
+
+static unsigned long rk3588_sdmmc_rates[] = {
+	400000, 24000000, 50000000, 100000000, 150000000, 200000000,
+	300000000, 400000000, 600000000, 700000000
+};
+
+static struct sys_clk_info_t sys_clk_info;
+static int clk_scmi_dsu_set_rate(rk_scmi_clock_t *clock, unsigned long rate);
+
+static struct pvtpll_table *rkclk_get_pvtpll_config(struct pvtpll_table *table,
+						    unsigned int count,
+						    unsigned int freq_hz)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		if (freq_hz == table[i].rate)
+			return &table[i];
+	}
+	return NULL;
+}
+
+static int clk_cpul_set_rate(unsigned long rate, enum pll_type_sel type)
+{
+	struct pvtpll_table *pvtpll;
+	int div;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	pvtpll = rkclk_get_pvtpll_config(sys_clk_info.cpul_table,
+					 sys_clk_info.cpul_rate_count, rate);
+	if (pvtpll == NULL)
+		return SCMI_INVALID_PARAMETERS;
+
+	/* set lpll */
+	if (PVTPLL_NEED(type, pvtpll->length) != 0) {
+		/* set clock gating interval */
+		mmio_write_32(LITCOREGRF_BASE + RK3588_CPUL_PVTPLL_CON2,
+			      0x00040000);
+		/* set ring sel */
+		mmio_write_32(LITCOREGRF_BASE + RK3588_CPUL_PVTPLL_CON0_L,
+			      0x07000000 | (pvtpll->ring_sel << 8));
+		/* set length */
+		mmio_write_32(LITCOREGRF_BASE + RK3588_CPUL_PVTPLL_CON0_H,
+			      0x003f0000 | pvtpll->length);
+		/* set cal cnt = 24, T = 1us */
+		mmio_write_32(LITCOREGRF_BASE + RK3588_CPUL_PVTPLL_CON1,
+			      0x18);
+		/* enable pvtpll */
+		mmio_write_32(LITCOREGRF_BASE + RK3588_CPUL_PVTPLL_CON0_L,
+			      0x00020002);
+		/* start monitor */
+		mmio_write_32(LITCOREGRF_BASE + RK3588_CPUL_PVTPLL_CON0_L,
+			      0x00010001);
+		/* set corel mux pvtpll */
+		mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(7),
+			      CPUL_PVTPLL_PATH_PVTPLL);
+		mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(6),
+			      CPUL_CLK_PATH_PVTPLL);
+		mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(7),
+			      CPUL_CLK_PATH_PVTPLL);
+		return 0;
+	}
+
+	/* set clk corel div */
+	div = DIV_ROUND_UP(GPLL_RATE, rate) - 1;
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(6),
+		      CLKDIV_5BITS_SHF(div, 0) | CLKDIV_5BITS_SHF(div, 7));
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(7),
+		      CLKDIV_5BITS_SHF(div, 0) | CLKDIV_5BITS_SHF(div, 7));
+	/* set corel mux gpll */
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(5),
+		      CPUL_CLK_PATH_NOR_GPLL);
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(6),
+		      CPUL_CLK_PATH_LPLL);
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(7),
+		      CPUL_CLK_PATH_LPLL);
+
+	return 0;
+}
+
+static int clk_scmi_cpul_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int ret;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	ret = clk_cpul_set_rate(rate, PLL_SEL_AUTO);
+	if (ret == 0) {
+		sys_clk_info.cpul_rate = rate;
+		ret = clk_scmi_dsu_set_rate(clock, rate);
+	}
+
+	return ret;
+}
+
+static unsigned long rk3588_lpll_get_rate(void)
+{
+	unsigned int m, p, s, k;
+	uint64_t rate64 = 24000000, postdiv;
+	int mode;
+
+	mode = (mmio_read_32(DSUCRU_BASE + CRU_CLKSEL_CON(5)) >> 14) &
+	       0x3;
+
+	if (mode == 0)
+		return rate64;
+
+	m = (mmio_read_32(DSUCRU_BASE + CRU_PLL_CON(16)) >>
+		 CRU_PLLCON0_M_SHIFT) &
+		CRU_PLLCON0_M_MASK;
+	p = (mmio_read_32(DSUCRU_BASE + CRU_PLL_CON(17)) >>
+		    CRU_PLLCON1_P_SHIFT) &
+		   CRU_PLLCON1_P_MASK;
+	s = (mmio_read_32(DSUCRU_BASE + CRU_PLL_CON(17)) >>
+		  CRU_PLLCON1_S_SHIFT) &
+		 CRU_PLLCON1_S_MASK;
+	k = (mmio_read_32(DSUCRU_BASE + CRU_PLL_CON(18)) >>
+		    CRU_PLLCON2_K_SHIFT) &
+		   CRU_PLLCON2_K_MASK;
+
+	rate64 *= m;
+	rate64 = rate64 / p;
+
+	if (k != 0) {
+		/* fractional mode */
+		uint64_t frac_rate64 = 24000000 * k;
+
+		postdiv = p * 65535;
+		frac_rate64 = frac_rate64 / postdiv;
+		rate64 += frac_rate64;
+	}
+	rate64 = rate64 >> s;
+
+	return (unsigned long)rate64;
+}
+
+static unsigned long clk_scmi_cpul_get_rate(rk_scmi_clock_t *clock)
+{
+	int src, div;
+
+	src = mmio_read_32(DSUCRU_BASE + CRU_CLKSEL_CON(6)) & 0x0060;
+	src = src >> 5;
+	if (src == 2) {
+		return sys_clk_info.cpul_rate;
+	} else {
+		src = mmio_read_32(DSUCRU_BASE + CRU_CLKSEL_CON(5)) & 0xc000;
+		src = src >> 14;
+		div = mmio_read_32(DSUCRU_BASE + CRU_CLKSEL_CON(6)) & 0x1f;
+		switch (src) {
+		case 0:
+			return 24000000;
+		case 1:
+			/* Make the return rate is equal to the set rate */
+			if (sys_clk_info.cpul_rate)
+				return sys_clk_info.cpul_rate;
+			else
+				return GPLL_RATE / (div + 1);
+		case 2:
+			return rk3588_lpll_get_rate();
+		default:
+			return 0;
+		}
+	}
+}
+
+static int clk_scmi_cpul_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static void clk_scmi_b0pll_disable(void)
+{
+	static bool is_b0pll_disabled;
+
+	if (is_b0pll_disabled != 0)
+		return;
+
+	/* set coreb01 mux gpll */
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(0),
+		      CPUB01_CLK_PATH_NOR_GPLL);
+	 /* pll enter slow mode */
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_MODE_CON0, CPU_PLL_PATH_SLOWMODE);
+	/* set pll power down */
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_PLL_CON(1), CRU_PLL_POWER_DOWN);
+
+	is_b0pll_disabled = true;
+}
+
+static int clk_cpub01_set_rate(unsigned long rate, enum pll_type_sel type)
+{
+	struct pvtpll_table *pvtpll;
+	int div;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	pvtpll = rkclk_get_pvtpll_config(sys_clk_info.cpub01_table,
+					 sys_clk_info.cpub01_rate_count, rate);
+	if (pvtpll == NULL)
+		return SCMI_INVALID_PARAMETERS;
+
+	/* set b0pll */
+	if (PVTPLL_NEED(type, pvtpll->length)) {
+		/* set clock gating interval */
+		mmio_write_32(BIGCORE0GRF_BASE + RK3588_CPUB_PVTPLL_CON2,
+			      0x00040000);
+		/* set ring sel */
+		mmio_write_32(BIGCORE0GRF_BASE + RK3588_CPUB_PVTPLL_CON0_L,
+			      0x07000000 | (pvtpll->ring_sel << 8));
+		/* set length */
+		mmio_write_32(BIGCORE0GRF_BASE + RK3588_CPUB_PVTPLL_CON0_H,
+			      0x003f0000 | pvtpll->length);
+		/* set cal cnt = 24, T = 1us */
+		mmio_write_32(BIGCORE0GRF_BASE + RK3588_CPUB_PVTPLL_CON1,
+			      0x18);
+		/* enable pvtpll */
+		mmio_write_32(BIGCORE0GRF_BASE + RK3588_CPUB_PVTPLL_CON0_L,
+			      0x00020002);
+		/* start monitor */
+		mmio_write_32(BIGCORE0GRF_BASE + RK3588_CPUB_PVTPLL_CON0_L,
+			      0x00010001);
+		/* set core mux pvtpll */
+		mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(2),
+			      CPUB01_PVTPLL_PATH_PVTPLL);
+		mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(0),
+			      CPUB01_CLK_PATH_B0_PVTPLL);
+		mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(1),
+			      CPUB01_CLK_PATH_B1_PVTPLL);
+		goto out;
+	}
+
+	/* set clk coreb01 div */
+	div = DIV_ROUND_UP(GPLL_RATE, rate) - 1;
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(0),
+		      CLKDIV_5BITS_SHF(div, 8));
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(1),
+		      CLKDIV_5BITS_SHF(div, 0));
+	/* set coreb01 mux gpll */
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(0),
+		      CPUB01_CLK_PATH_NOR_GPLL);
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(0),
+		      CPUB01_CLK_PATH_B0PLL);
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(1),
+		      CPUB01_CLK_PATH_B1PLL);
+
+out:
+	clk_scmi_b0pll_disable();
+
+	return 0;
+}
+
+static int clk_scmi_cpub01_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int ret;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	ret = clk_cpub01_set_rate(rate, PLL_SEL_AUTO);
+	if (ret == 0)
+		sys_clk_info.cpub01_rate = rate;
+
+	return ret;
+}
+
+static unsigned long rk3588_b0pll_get_rate(void)
+{
+	unsigned int m, p, s, k;
+	uint64_t rate64 = 24000000, postdiv;
+	int mode;
+
+	mode = (mmio_read_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(0)) >> 6) &
+	       0x3;
+
+	if (mode == 0)
+		return rate64;
+
+	m = (mmio_read_32(BIGCORE0CRU_BASE + CRU_PLL_CON(0)) >>
+		 CRU_PLLCON0_M_SHIFT) &
+		CRU_PLLCON0_M_MASK;
+	p = (mmio_read_32(BIGCORE0CRU_BASE + CRU_PLL_CON(1)) >>
+		    CRU_PLLCON1_P_SHIFT) &
+		   CRU_PLLCON1_P_MASK;
+	s = (mmio_read_32(BIGCORE0CRU_BASE + CRU_PLL_CON(1)) >>
+		  CRU_PLLCON1_S_SHIFT) &
+		 CRU_PLLCON1_S_MASK;
+	k = (mmio_read_32(BIGCORE0CRU_BASE + CRU_PLL_CON(2)) >>
+		    CRU_PLLCON2_K_SHIFT) &
+		   CRU_PLLCON2_K_MASK;
+
+	rate64 *= m;
+	rate64 = rate64 / p;
+
+	if (k != 0) {
+		/* fractional mode */
+		uint64_t frac_rate64 = 24000000 * k;
+
+		postdiv = p * 65535;
+		frac_rate64 = frac_rate64 / postdiv;
+		rate64 += frac_rate64;
+	}
+	rate64 = rate64 >> s;
+
+	return (unsigned long)rate64;
+}
+
+static unsigned long clk_scmi_cpub01_get_rate(rk_scmi_clock_t *clock)
+{
+	int value, src, div;
+
+	value = mmio_read_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(0));
+	src = (value & 0x6000) >> 13;
+	if (src == 2) {
+		return sys_clk_info.cpub01_rate;
+	} else {
+		src = (value & 0x00c0) >> 6;
+		div = (value & 0x1f00) >> 8;
+		switch (src) {
+		case 0:
+			return 24000000;
+		case 1:
+			/* Make the return rate is equal to the set rate */
+			if (sys_clk_info.cpub01_rate)
+				return sys_clk_info.cpub01_rate;
+			else
+				return GPLL_RATE / (div + 1);
+		case 2:
+			return rk3588_b0pll_get_rate();
+		default:
+			return 0;
+		}
+	}
+}
+
+static int clk_scmi_cpub01_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static void clk_scmi_b1pll_disable(void)
+{
+	static bool is_b1pll_disabled;
+
+	if (is_b1pll_disabled != 0)
+		return;
+
+	/* set coreb23 mux gpll */
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(0),
+		      CPUB01_CLK_PATH_NOR_GPLL);
+	 /* pll enter slow mode */
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_MODE_CON0, CPU_PLL_PATH_SLOWMODE);
+	/* set pll power down */
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_PLL_CON(9), CRU_PLL_POWER_DOWN);
+
+	is_b1pll_disabled = true;
+}
+
+static int clk_cpub23_set_rate(unsigned long rate, enum pll_type_sel type)
+{
+	struct pvtpll_table *pvtpll;
+	int div;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	pvtpll = rkclk_get_pvtpll_config(sys_clk_info.cpub23_table,
+					 sys_clk_info.cpub23_rate_count, rate);
+	if (pvtpll == NULL)
+		return SCMI_INVALID_PARAMETERS;
+
+	/* set b1pll */
+	if (PVTPLL_NEED(type, pvtpll->length)) {
+		/* set clock gating interval */
+		mmio_write_32(BIGCORE1GRF_BASE + RK3588_CPUB_PVTPLL_CON2,
+			      0x00040000);
+		/* set ring sel */
+		mmio_write_32(BIGCORE1GRF_BASE + RK3588_CPUB_PVTPLL_CON0_L,
+			      0x07000000 | (pvtpll->ring_sel << 8));
+		/* set length */
+		mmio_write_32(BIGCORE1GRF_BASE + RK3588_CPUB_PVTPLL_CON0_H,
+			      0x003f0000 | pvtpll->length);
+		/* set cal cnt = 24, T = 1us */
+		mmio_write_32(BIGCORE1GRF_BASE + RK3588_CPUB_PVTPLL_CON1,
+			      0x18);
+		/* enable pvtpll */
+		mmio_write_32(BIGCORE1GRF_BASE + RK3588_CPUB_PVTPLL_CON0_L,
+			      0x00020002);
+		/* start monitor */
+		mmio_write_32(BIGCORE1GRF_BASE + RK3588_CPUB_PVTPLL_CON0_L,
+			      0x00010001);
+		/* set core mux pvtpll */
+		mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(2),
+			      CPUB01_PVTPLL_PATH_PVTPLL);
+		mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(0),
+			      CPUB01_CLK_PATH_B0_PVTPLL);
+		mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(1),
+			      CPUB01_CLK_PATH_B1_PVTPLL);
+		goto out;
+	}
+
+	/* set clk coreb23 div */
+	div = DIV_ROUND_UP(GPLL_RATE, rate) - 1;
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(0),
+		      CLKDIV_5BITS_SHF(div, 8));
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(1),
+		      CLKDIV_5BITS_SHF(div, 0));
+	/* set coreb23 mux gpll */
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(0),
+		      CPUB01_CLK_PATH_NOR_GPLL);
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(0),
+		      CPUB01_CLK_PATH_B0PLL);
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(1),
+		      CPUB01_CLK_PATH_B1PLL);
+
+out:
+	clk_scmi_b1pll_disable();
+
+	return 0;
+}
+
+static int clk_scmi_cpub23_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int ret;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	ret = clk_cpub23_set_rate(rate, PLL_SEL_AUTO);
+	if (ret == 0)
+		sys_clk_info.cpub23_rate = rate;
+
+	return ret;
+}
+
+static unsigned long rk3588_b1pll_get_rate(void)
+{
+	unsigned int m, p, s, k;
+	uint64_t rate64 = 24000000, postdiv;
+	int mode;
+
+	mode = (mmio_read_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(0)) >> 6) &
+	       0x3;
+
+	if (mode == 0)
+		return rate64;
+
+	m = (mmio_read_32(BIGCORE1CRU_BASE + CRU_PLL_CON(8)) >>
+		 CRU_PLLCON0_M_SHIFT) &
+		CRU_PLLCON0_M_MASK;
+	p = (mmio_read_32(BIGCORE1CRU_BASE + CRU_PLL_CON(9)) >>
+		    CRU_PLLCON1_P_SHIFT) &
+		   CRU_PLLCON1_P_MASK;
+	s = (mmio_read_32(BIGCORE1CRU_BASE + CRU_PLL_CON(9)) >>
+		  CRU_PLLCON1_S_SHIFT) &
+		 CRU_PLLCON1_S_MASK;
+	k = (mmio_read_32(BIGCORE1CRU_BASE + CRU_PLL_CON(10)) >>
+		    CRU_PLLCON2_K_SHIFT) &
+		   CRU_PLLCON2_K_MASK;
+
+	rate64 *= m;
+	rate64 = rate64 / p;
+
+	if (k != 0) {
+		/* fractional mode */
+		uint64_t frac_rate64 = 24000000 * k;
+
+		postdiv = p * 65535;
+		frac_rate64 = frac_rate64 / postdiv;
+		rate64 += frac_rate64;
+	}
+	rate64 = rate64 >> s;
+
+	return (unsigned long)rate64;
+}
+
+static unsigned long clk_scmi_cpub23_get_rate(rk_scmi_clock_t *clock)
+{
+	int value, src, div;
+
+	value = mmio_read_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(0));
+	src = (value & 0x6000) >> 13;
+	if (src == 2) {
+		return sys_clk_info.cpub23_rate;
+	} else {
+		src = (value & 0x00c0) >> 6;
+		div = (value & 0x1f00) >> 8;
+		switch (src) {
+		case 0:
+			return 24000000;
+		case 1:
+			/* Make the return rate is equal to the set rate */
+			if (sys_clk_info.cpub23_rate)
+				return sys_clk_info.cpub23_rate;
+			else
+				return GPLL_RATE / (div + 1);
+		case 2:
+			return rk3588_b1pll_get_rate();
+		default:
+			return 0;
+		}
+	}
+}
+
+static int clk_scmi_cpub23_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_dsu_get_rate(rk_scmi_clock_t *clock)
+{
+	int src, div;
+
+	src = mmio_read_32(DSUCRU_BASE + CRU_CLKSEL_CON(1)) & 0x1;
+	if (src != 0) {
+		return sys_clk_info.dsu_rate;
+	} else {
+		src = mmio_read_32(DSUCRU_BASE + CRU_CLKSEL_CON(0)) & 0x3000;
+		src = src >> 12;
+		div = mmio_read_32(DSUCRU_BASE + CRU_CLKSEL_CON(0)) & 0xf80;
+		div = div >> 7;
+		switch (src) {
+		case 0:
+			return rk3588_b0pll_get_rate() / (div + 1);
+		case 1:
+			return rk3588_b1pll_get_rate() / (div + 1);
+		case 2:
+			return rk3588_lpll_get_rate() / (div + 1);
+		case 3:
+			return GPLL_RATE / (div + 1);
+		default:
+			return 0;
+		}
+	}
+}
+
+static void clk_scmi_lpll_disable(void)
+{
+	static bool is_lpll_disabled;
+
+	if (is_lpll_disabled)
+		return;
+
+	/* set corel mux gpll */
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(5),
+		      CPUL_CLK_PATH_NOR_GPLL);
+	/* set dsu mux gpll */
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(0),
+		      SCLK_DSU_PATH_NOR_GPLL);
+	/* pll enter slow mode */
+	mmio_write_32(DSUCRU_BASE + CRU_MODE_CON0, CPU_PLL_PATH_SLOWMODE);
+	/* set pll power down */
+	mmio_write_32(DSUCRU_BASE + CRU_PLL_CON(17), CRU_PLL_POWER_DOWN);
+
+	is_lpll_disabled = true;
+}
+
+static int clk_dsu_set_rate(unsigned long rate, enum pll_type_sel type)
+{
+	struct pvtpll_table *pvtpll;
+	int div;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	pvtpll = rkclk_get_pvtpll_config(sys_clk_info.cpul_table,
+					 sys_clk_info.cpul_rate_count, rate);
+	if (pvtpll == NULL)
+		return SCMI_INVALID_PARAMETERS;
+
+	/* set pvtpll */
+	if (PVTPLL_NEED(type, pvtpll->length)) {
+		/* set clock gating interval */
+		mmio_write_32(DSUGRF_BASE + RK3588_DSU_PVTPLL_CON2,
+			      0x00040000);
+		/* set ring sel */
+		mmio_write_32(DSUGRF_BASE + RK3588_DSU_PVTPLL_CON0_L,
+			      0x07000000 | (pvtpll->ring_sel << 8));
+		/* set length */
+		mmio_write_32(DSUGRF_BASE + RK3588_DSU_PVTPLL_CON0_H,
+			      0x003f0000 | pvtpll->length);
+		/* set cal cnt = 24, T = 1us */
+		mmio_write_32(DSUGRF_BASE + RK3588_DSU_PVTPLL_CON1,
+			      0x18);
+		/* enable pvtpll */
+		mmio_write_32(DSUGRF_BASE + RK3588_DSU_PVTPLL_CON0_L,
+			      0x00020002);
+		/* start monitor */
+		mmio_write_32(DSUGRF_BASE + RK3588_DSU_PVTPLL_CON0_L,
+			      0x00010001);
+		/* set dsu mux pvtpll */
+		mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(7),
+			      DSU_PVTPLL_PATH_PVTPLL);
+		mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(1),
+			      SCLK_DSU_PATH_PVTPLL);
+		goto out;
+	}
+	/* set dsu div */
+	div = DIV_ROUND_UP(GPLL_RATE, rate) - 1;
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(0),
+		      CLKDIV_5BITS_SHF(div, 7));
+	/* set dsu mux gpll */
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(0),
+		      SCLK_DSU_PATH_NOR_GPLL);
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(1),
+		      SCLK_DSU_PATH_NOR_PLL);
+
+out:
+	clk_scmi_lpll_disable();
+
+	return 0;
+}
+
+static int clk_scmi_dsu_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int ret;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	ret = clk_dsu_set_rate(rate, PLL_SEL_AUTO);
+
+	if (ret == 0)
+		sys_clk_info.dsu_rate = rate;
+	return ret;
+}
+
+static int clk_scmi_dsu_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_gpu_get_rate(rk_scmi_clock_t *clock)
+{
+	int div, src;
+
+	if ((mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(158)) & 0x4000) != 0) {
+		return sys_clk_info.gpu_rate;
+	} else {
+		div = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(158)) & 0x1f;
+		src = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(158)) & 0x00e0;
+		src = src >> 5;
+		switch (src) {
+		case 0:
+			/* Make the return rate is equal to the set rate */
+			if (sys_clk_info.gpu_rate)
+				return sys_clk_info.gpu_rate;
+			else
+				return GPLL_RATE / (div + 1);
+		case 1:
+			return CPLL_RATE / (div + 1);
+		case 2:
+			return AUPLL_RATE / (div + 1);
+		case 3:
+			return NPLL_RATE / (div + 1);
+		case 4:
+			return SPLL_RATE / (div + 1);
+		default:
+			return 0;
+		}
+	}
+}
+
+static int clk_gpu_set_rate(unsigned long rate, enum pll_type_sel type)
+{
+	struct pvtpll_table *pvtpll;
+	int div;
+
+	pvtpll = rkclk_get_pvtpll_config(sys_clk_info.gpu_table,
+					 sys_clk_info.gpu_rate_count, rate);
+	if (pvtpll == NULL)
+		return SCMI_INVALID_PARAMETERS;
+
+	if (PVTPLL_NEED(type, pvtpll->length)) {
+		/* set clock gating interval */
+		mmio_write_32(GPUGRF_BASE + RK3588_GPU_PVTPLL_CON2,
+			      0x00040000);
+		/* set ring sel */
+		mmio_write_32(GPUGRF_BASE + RK3588_GPU_PVTPLL_CON0_L,
+			      0x07000000 | (pvtpll->ring_sel << 8));
+		/* set length */
+		mmio_write_32(GPUGRF_BASE + RK3588_GPU_PVTPLL_CON0_H,
+			      0x003f0000 | pvtpll->length);
+		/* set cal cnt = 24, T = 1us */
+		mmio_write_32(GPUGRF_BASE + RK3588_GPU_PVTPLL_CON1,
+			      0x18);
+		/* enable pvtpll */
+		mmio_write_32(GPUGRF_BASE + RK3588_GPU_PVTPLL_CON0_L,
+			      0x00020002);
+		/* start monitor */
+		mmio_write_32(GPUGRF_BASE + RK3588_GPU_PVTPLL_CON0_L,
+			      0x00010001);
+		/* set gpu mux pvtpll */
+		mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(158),
+			      GPU_CLK_PATH_PVTPLL);
+		return 0;
+	}
+
+	/* set gpu div */
+	div = DIV_ROUND_UP(GPLL_RATE, rate);
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(158),
+		      CLKDIV_5BITS_SHF(div - 1, 0));
+	/* set gpu mux gpll */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(158),
+		      GPU_CLK_PATH_NOR_GPLL);
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(158),
+		      GPU_CLK_PATH_NOR_PLL);
+
+	return 0;
+}
+
+static int clk_scmi_gpu_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int ret;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	ret = clk_gpu_set_rate(rate, PLL_SEL_AUTO);
+	if (ret == 0)
+		sys_clk_info.gpu_rate = rate;
+
+	return ret;
+}
+
+static int clk_scmi_gpu_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_npu_get_rate(rk_scmi_clock_t *clock)
+{
+	int div, src;
+
+	if ((mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(74)) & 0x1) != 0) {
+		return sys_clk_info.npu_rate;
+	} else {
+		div = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(73)) & 0x007c;
+		div = div >> 2;
+		src = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(73)) & 0x0380;
+		src = src >> 7;
+		switch (src) {
+		case 0:
+			/* Make the return rate is equal to the set rate */
+			if (sys_clk_info.npu_rate != 0)
+				return sys_clk_info.npu_rate;
+			else
+				return GPLL_RATE / (div + 1);
+		case 1:
+			return CPLL_RATE / (div + 1);
+		case 2:
+			return AUPLL_RATE / (div + 1);
+		case 3:
+			return NPLL_RATE / (div + 1);
+		case 4:
+			return SPLL_RATE / (div + 1);
+		default:
+			return 0;
+		}
+	}
+}
+
+static int clk_npu_set_rate(unsigned long rate, enum pll_type_sel type)
+{
+	struct pvtpll_table *pvtpll;
+	int div;
+
+	pvtpll = rkclk_get_pvtpll_config(sys_clk_info.npu_table,
+					 sys_clk_info.npu_rate_count, rate);
+	if (pvtpll == NULL)
+		return SCMI_INVALID_PARAMETERS;
+
+	if (PVTPLL_NEED(type, pvtpll->length)) {
+		/* set clock gating interval */
+		mmio_write_32(NPUGRF_BASE + RK3588_NPU_PVTPLL_CON2,
+			      0x00040000);
+		/* set ring sel */
+		mmio_write_32(NPUGRF_BASE + RK3588_NPU_PVTPLL_CON0_L,
+			      0x07000000 | (pvtpll->ring_sel << 8));
+		/* set length */
+		mmio_write_32(NPUGRF_BASE + RK3588_NPU_PVTPLL_CON0_H,
+			      0x003f0000 | pvtpll->length);
+		/* set cal cnt = 24, T = 1us */
+		mmio_write_32(NPUGRF_BASE + RK3588_NPU_PVTPLL_CON1,
+			      0x18);
+		/* enable pvtpll */
+		mmio_write_32(NPUGRF_BASE + RK3588_NPU_PVTPLL_CON0_L,
+			      0x00020002);
+		/* start monitor */
+		mmio_write_32(NPUGRF_BASE + RK3588_NPU_PVTPLL_CON0_L,
+			      0x00010001);
+		/* set npu mux pvtpll */
+		mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(74),
+			      NPU_CLK_PATH_PVTPLL);
+		return 0;
+	}
+
+	/* set npu div */
+	div = DIV_ROUND_UP(GPLL_RATE, rate);
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(73),
+		      CLKDIV_5BITS_SHF(div - 1, 2));
+	/* set npu mux gpll */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(73),
+		      NPU_CLK_PATH_NOR_GPLL);
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(74),
+		      NPU_CLK_PATH_NOR_PLL);
+
+	return 0;
+}
+
+static int clk_scmi_npu_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int ret;
+
+	if (rate == 0)
+		return SCMI_INVALID_PARAMETERS;
+
+	ret = clk_npu_set_rate(rate, PLL_SEL_AUTO);
+	if (ret == 0)
+		sys_clk_info.npu_rate = rate;
+
+	return ret;
+}
+
+static int clk_scmi_npu_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_sbus_get_rate(rk_scmi_clock_t *clock)
+{
+	int div;
+
+	if ((mmio_read_32(BUSSCRU_BASE + CRU_CLKSEL_CON(0)) & 0x0800) != 0) {
+		div = mmio_read_32(BUSSCRU_BASE + CRU_CLKSEL_CON(0));
+		div = (div & 0x03e0) >> 5;
+		return SPLL_RATE / (div + 1);
+	} else {
+		return OSC_HZ;
+	}
+}
+
+static int clk_scmi_sbus_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int div;
+
+	if (rate == OSC_HZ) {
+		mmio_write_32(BUSSCRU_BASE + CRU_CLKSEL_CON(0),
+			      WMSK_BIT(11));
+		return 0;
+	}
+
+	div = DIV_ROUND_UP(SPLL_RATE, rate);
+	mmio_write_32(BUSSCRU_BASE + CRU_CLKSEL_CON(0),
+		      CLKDIV_5BITS_SHF(div - 1, 5));
+	mmio_write_32(BUSSCRU_BASE + CRU_CLKSEL_CON(0),
+		      BIT_WITH_WMSK(11) | WMSK_BIT(10));
+	return 0;
+}
+
+static int clk_scmi_sbus_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_pclk_sbus_get_rate(rk_scmi_clock_t *clock)
+{
+	int div;
+
+	div = mmio_read_32(BUSSCRU_BASE + CRU_CLKSEL_CON(0));
+	div = div & 0x001f;
+	return SPLL_RATE / (div + 1);
+
+}
+
+static int clk_scmi_pclk_sbus_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int div;
+
+	div = DIV_ROUND_UP(SPLL_RATE, rate);
+	mmio_write_32(BUSSCRU_BASE + CRU_CLKSEL_CON(0),
+		      CLKDIV_5BITS_SHF(div - 1, 0));
+	return 0;
+}
+
+static int clk_scmi_pclk_sbus_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_cclk_sdmmc_get_rate(rk_scmi_clock_t *clock)
+{
+	int div;
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(3)) & 0x3000;
+	src = src >> 12;
+	div = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(3)) & 0x0fc0;
+	div = div >> 6;
+	if (src == 1) {
+		return SPLL_RATE / (div + 1);
+	} else if (src == 2) {
+		return OSC_HZ / (div + 1);
+	} else {
+		return GPLL_RATE / (div + 1);
+	}
+}
+
+static int clk_scmi_cclk_sdmmc_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int div;
+
+	if ((OSC_HZ % rate) == 0) {
+		div = DIV_ROUND_UP(OSC_HZ, rate);
+		mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(3),
+			      CLKDIV_6BITS_SHF(div - 1, 6) |
+			      BITS_WITH_WMASK(2U, 0x3U, 12));
+	} else if ((SPLL_RATE % rate) == 0) {
+		div = DIV_ROUND_UP(SPLL_RATE, rate);
+		mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(3),
+			      CLKDIV_6BITS_SHF(div - 1, 6) |
+			      BITS_WITH_WMASK(1U, 0x3U, 12));
+	} else {
+		div = DIV_ROUND_UP(GPLL_RATE, rate);
+		mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(3),
+			      CLKDIV_6BITS_SHF(div - 1, 6) |
+			      BITS_WITH_WMASK(0U, 0x3U, 12));
+	}
+
+	return 0;
+}
+
+static int clk_scmi_cclk_sdmmc_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(3),
+		      BITS_WITH_WMASK(!status, 0x1U, 4));
+	return 0;
+}
+
+static unsigned long clk_scmi_dclk_sdmmc_get_rate(rk_scmi_clock_t *clock)
+{
+	int div;
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(3)) & 0x0020;
+	div = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(3)) & 0x001f;
+	if (src != 0) {
+		return SPLL_RATE / (div + 1);
+	} else {
+		return GPLL_RATE / (div + 1);
+	}
+}
+
+static int clk_scmi_dclk_sdmmc_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	int div;
+
+	if ((SPLL_RATE % rate) == 0) {
+		div = DIV_ROUND_UP(SPLL_RATE, rate);
+		mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(3),
+			      CLKDIV_5BITS_SHF(div - 1, 0) |
+			      BITS_WITH_WMASK(1U, 0x1U, 5));
+	} else {
+		div = DIV_ROUND_UP(GPLL_RATE, rate);
+		mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(3),
+			      CLKDIV_5BITS_SHF(div - 1, 0) |
+			      BITS_WITH_WMASK(0U, 0x1U, 5));
+	}
+	return 0;
+}
+
+static int clk_scmi_dclk_sdmmc_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(3),
+		      BITS_WITH_WMASK(!status, 0x1U, 1));
+	return 0;
+}
+
+static unsigned long clk_scmi_aclk_secure_ns_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(1)) & 0x0003;
+	switch (src) {
+	case 0:
+		return 350 * MHz;
+	case 1:
+		return 200 * MHz;
+	case 2:
+		return 100 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_aclk_secure_ns_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 350 * MHz)
+		src = 0;
+	else if (rate >= 200 * MHz)
+		src = 1;
+	else if (rate >= 100 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(src, 0x3U, 0));
+
+	return 0;
+}
+
+static int clk_scmi_aclk_secure_ns_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_hclk_secure_ns_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(1)) & 0x000c;
+	src = src >> 2;
+	switch (src) {
+	case 0:
+		return 150 * MHz;
+	case 1:
+		return 100 * MHz;
+	case 2:
+		return 50 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_hclk_secure_ns_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 150 * MHz)
+		src = 0;
+	else if (rate >= 100 * MHz)
+		src = 1;
+	else if (rate >= 50 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(src, 0x3U, 2));
+	return 0;
+}
+
+static int clk_scmi_hclk_secure_ns_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_tclk_wdt_get_rate(rk_scmi_clock_t *clock)
+{
+	return OSC_HZ;
+}
+
+static int clk_scmi_tclk_wdt_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(2),
+		      BITS_WITH_WMASK(!status, 0x1U, 0));
+	return 0;
+}
+
+static unsigned long clk_scmi_keyladder_core_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(2)) & 0x00c0;
+	src = src >> 6;
+	switch (src) {
+	case 0:
+		return 350 * MHz;
+	case 1:
+		return 233 * MHz;
+	case 2:
+		return 116 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_keyladder_core_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 350 * MHz)
+		src = 0;
+	else if (rate >= 233 * MHz)
+		src = 1;
+	else if (rate >= 116 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(2),
+		      BITS_WITH_WMASK(src, 0x3U, 6));
+	return 0;
+}
+
+static int clk_scmi_keyladder_core_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 9));
+	return 0;
+}
+
+static unsigned long clk_scmi_keyladder_rng_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(2)) & 0x0300;
+	src = src >> 8;
+	switch (src) {
+	case 0:
+		return 175 * MHz;
+	case 1:
+		return 116 * MHz;
+	case 2:
+		return 58 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_keyladder_rng_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 175 * MHz)
+		src = 0;
+	else if (rate >= 116 * MHz)
+		src = 1;
+	else if (rate >= 58 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(2),
+		      BITS_WITH_WMASK(src, 0x3U, 8));
+	return 0;
+}
+
+static int clk_scmi_keyladder_rng_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 10));
+	return 0;
+}
+
+static unsigned long clk_scmi_aclk_secure_s_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(1)) & 0x0030;
+	src = src >> 4;
+	switch (src) {
+	case 0:
+		return 350 * MHz;
+	case 1:
+		return 233 * MHz;
+	case 2:
+		return 116 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_aclk_secure_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 350 * MHz)
+		src = 0;
+	else if (rate >= 233 * MHz)
+		src = 1;
+	else if (rate >= 116 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(src, 0x3U, 4));
+	return 0;
+}
+
+static int clk_scmi_aclk_secure_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_hclk_secure_s_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(1)) & 0x00c0;
+	src = src >> 6;
+	switch (src) {
+	case 0:
+		return 175 * MHz;
+	case 1:
+		return 116 * MHz;
+	case 2:
+		return 58 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_hclk_secure_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 175 * MHz)
+		src = 0;
+	else if (rate >= 116 * MHz)
+		src = 1;
+	else if (rate >= 58 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(src, 0x3U, 6));
+	return 0;
+}
+
+static int clk_scmi_hclk_secure_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_pclk_secure_s_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(1)) & 0x0300;
+	src = src >> 8;
+	switch (src) {
+	case 0:
+		return 116 * MHz;
+	case 1:
+		return 58 * MHz;
+	case 2:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_pclk_secure_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 116 * MHz)
+		src = 0;
+	else if (rate >= 58 * MHz)
+		src = 1;
+	else
+		src = 2;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(src, 0x3U, 8));
+	return 0;
+}
+
+static int clk_scmi_pclk_secure_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_crypto_rng_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(1)) & 0xc000;
+	src = src >> 14;
+	switch (src) {
+	case 0:
+		return 175 * MHz;
+	case 1:
+		return 116 * MHz;
+	case 2:
+		return 58 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_crypto_rng_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 175 * MHz)
+		src = 0;
+	else if (rate >= 116 * MHz)
+		src = 1;
+	else if (rate >= 58 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(src, 0x3U, 14));
+	return 0;
+}
+
+static int clk_scmi_crypto_rng_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 1));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_crypto_core_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(1)) & 0x0c00;
+	src = src >> 10;
+	switch (src) {
+	case 0:
+		return 350 * MHz;
+	case 1:
+		return 233 * MHz;
+	case 2:
+		return 116 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_crypto_core_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 350 * MHz)
+		src = 0;
+	else if (rate >= 233 * MHz)
+		src = 1;
+	else if (rate >= 116 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(src, 0x3U, 10));
+	return 0;
+}
+
+static int clk_scmi_crypto_core_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(0),
+		      BITS_WITH_WMASK(!status, 0x1U, 15));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_crypto_pka_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(1)) & 0x3000;
+	src = src >> 12;
+	switch (src) {
+	case 0:
+		return 350 * MHz;
+	case 1:
+		return 233 * MHz;
+	case 2:
+		return 116 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_crypto_pka_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 350 * MHz)
+		src = 0;
+	else if (rate >= 233 * MHz)
+		src = 1;
+	else if (rate >= 116 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(src, 0x3U, 12));
+	return 0;
+}
+
+static int clk_scmi_crypto_pka_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 0));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_spll_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(BUSSCRU_BASE + CRU_MODE_CON0) & 0x3;
+	switch (src) {
+	case 0:
+		return OSC_HZ;
+	case 1:
+		return 702 * MHz;
+	case 2:
+		return 32768;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_spll_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 700 * MHz)
+		src = 1;
+	else
+		src = 0;
+
+	mmio_write_32(BUSSCRU_BASE + CRU_MODE_CON0,
+		      BITS_WITH_WMASK(0, 0x3U, 0));
+	mmio_write_32(BUSSCRU_BASE + CRU_PLL_CON(137),
+		      BITS_WITH_WMASK(2, 0x7U, 6));
+
+	mmio_write_32(BUSSCRU_BASE + CRU_MODE_CON0,
+		      BITS_WITH_WMASK(src, 0x3U, 0));
+	return 0;
+}
+
+static int clk_scmi_spll_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	return 0;
+}
+
+static unsigned long clk_scmi_hclk_sd_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_hclk_secure_ns_get_rate(clock);
+}
+
+static int clk_scmi_hclk_sd_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(3),
+		      BITS_WITH_WMASK(!status, 0x1U, 2));
+	return 0;
+}
+
+static unsigned long clk_scmi_crypto_rng_s_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(2)) & 0x0030;
+	src = src >> 4;
+	switch (src) {
+	case 0:
+		return 175 * MHz;
+	case 1:
+		return 116 * MHz;
+	case 2:
+		return 58 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_crypto_rng_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 175 * MHz)
+		src = 0;
+	else if (rate >= 116 * MHz)
+		src = 1;
+	else if (rate >= 58 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(2),
+		      BITS_WITH_WMASK(src, 0x3U, 4));
+	return 0;
+}
+
+static int clk_scmi_crypto_rng_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 6));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_crypto_core_s_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(2)) & 0x3;
+	src = src >> 0;
+	switch (src) {
+	case 0:
+		return 350 * MHz;
+	case 1:
+		return 233 * MHz;
+	case 2:
+		return 116 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_crypto_core_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 350 * MHz)
+		src = 0;
+	else if (rate >= 233 * MHz)
+		src = 1;
+	else if (rate >= 116 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(2),
+		      BITS_WITH_WMASK(src, 0x3U, 0));
+	return 0;
+}
+
+static int clk_scmi_crypto_core_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 4));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_crypto_pka_s_get_rate(rk_scmi_clock_t *clock)
+{
+	uint32_t src;
+
+	src = mmio_read_32(SCRU_BASE + CRU_CLKSEL_CON(2)) & 0x000c;
+	src = src >> 2;
+	switch (src) {
+	case 0:
+		return 350 * MHz;
+	case 1:
+		return 233 * MHz;
+	case 2:
+		return 116 * MHz;
+	case 3:
+		return OSC_HZ;
+	default:
+		return 0;
+	}
+}
+
+static int clk_scmi_crypto_pka_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	uint32_t src;
+
+	if (rate >= 350 * MHz)
+		src = 0;
+	else if (rate >= 233 * MHz)
+		src = 1;
+	else if (rate >= 116 * MHz)
+		src = 2;
+	else
+		src = 3;
+
+	mmio_write_32(SCRU_BASE + CRU_CLKSEL_CON(2),
+		      BITS_WITH_WMASK(src, 0x3U, 2));
+	return 0;
+}
+
+static int clk_scmi_crypto_pka_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 5));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_a_crypto_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_aclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_a_crypto_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_aclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_a_crypto_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 7));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_h_crypto_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_hclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_h_crypto_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_hclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_h_crypto_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 8));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_p_crypto_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_pclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_p_crypto_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_pclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_p_crypto_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(2),
+		      BITS_WITH_WMASK(!status, 0x1U, 13));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_a_keylad_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_aclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_a_keylad_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_aclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_a_keylad_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 11));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_h_keylad_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_hclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_h_keylad_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_hclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_h_keylad_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 12));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_p_keylad_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_pclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_p_keylad_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_pclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_p_keylad_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(2),
+		      BITS_WITH_WMASK(!status, 0x1U, 14));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_trng_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_hclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_trng_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_hclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_trng_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(3),
+		      BITS_WITH_WMASK(!status, 0x1U, 6));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_h_trng_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_hclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_h_trng_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_hclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_h_trng_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(2),
+		      BITS_WITH_WMASK(!status, 0x1U, 15));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_p_otpc_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return clk_scmi_pclk_secure_s_get_rate(clock);
+}
+
+static int clk_scmi_p_otpc_s_set_rate(rk_scmi_clock_t *clock, unsigned long rate)
+{
+	return clk_scmi_pclk_secure_s_set_rate(clock, rate);
+}
+
+static int clk_scmi_p_otpc_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 13));
+
+	return 0;
+}
+
+static unsigned long clk_scmi_otpc_s_get_rate(rk_scmi_clock_t *clock)
+{
+	return OSC_HZ;
+}
+
+static int clk_scmi_otpc_s_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(SCRU_BASE + CRU_CLKGATE_CON(1),
+		      BITS_WITH_WMASK(!status, 0x1U, 14));
+	return 0;
+}
+
+static unsigned long clk_scmi_otp_phy_get_rate(rk_scmi_clock_t *clock)
+{
+	return OSC_HZ;
+}
+
+static int clk_scmi_otp_phy_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
+		      BITS_WITH_WMASK(!status, 0x1U, 13));
+	return 0;
+}
+
+static unsigned long clk_scmi_otpc_rd_get_rate(rk_scmi_clock_t *clock)
+{
+	return OSC_HZ;
+}
+
+static int clk_scmi_otpc_rd_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
+		      BITS_WITH_WMASK(!status, 0x1U, 12));
+	return 0;
+}
+
+static unsigned long clk_scmi_otpc_arb_get_rate(rk_scmi_clock_t *clock)
+{
+	return OSC_HZ;
+}
+
+static int clk_scmi_otpc_arb_set_status(rk_scmi_clock_t *clock, bool status)
+{
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
+		      BITS_WITH_WMASK(!status, 0x1U, 11));
+	return 0;
+}
+
+static const struct rk_clk_ops clk_scmi_cpul_ops = {
+	.get_rate = clk_scmi_cpul_get_rate,
+	.set_rate = clk_scmi_cpul_set_rate,
+	.set_status = clk_scmi_cpul_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_dsu_ops = {
+	.get_rate = clk_scmi_dsu_get_rate,
+	.set_rate = clk_scmi_dsu_set_rate,
+	.set_status = clk_scmi_dsu_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_cpub01_ops = {
+	.get_rate = clk_scmi_cpub01_get_rate,
+	.set_rate = clk_scmi_cpub01_set_rate,
+	.set_status = clk_scmi_cpub01_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_cpub23_ops = {
+	.get_rate = clk_scmi_cpub23_get_rate,
+	.set_rate = clk_scmi_cpub23_set_rate,
+	.set_status = clk_scmi_cpub23_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_gpu_ops = {
+	.get_rate = clk_scmi_gpu_get_rate,
+	.set_rate = clk_scmi_gpu_set_rate,
+	.set_status = clk_scmi_gpu_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_npu_ops = {
+	.get_rate = clk_scmi_npu_get_rate,
+	.set_rate = clk_scmi_npu_set_rate,
+	.set_status = clk_scmi_npu_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_sbus_ops = {
+	.get_rate = clk_scmi_sbus_get_rate,
+	.set_rate = clk_scmi_sbus_set_rate,
+	.set_status = clk_scmi_sbus_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_pclk_sbus_ops = {
+	.get_rate = clk_scmi_pclk_sbus_get_rate,
+	.set_rate = clk_scmi_pclk_sbus_set_rate,
+	.set_status = clk_scmi_pclk_sbus_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_cclk_sdmmc_ops = {
+	.get_rate = clk_scmi_cclk_sdmmc_get_rate,
+	.set_rate = clk_scmi_cclk_sdmmc_set_rate,
+	.set_status = clk_scmi_cclk_sdmmc_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_dclk_sdmmc_ops = {
+	.get_rate = clk_scmi_dclk_sdmmc_get_rate,
+	.set_rate = clk_scmi_dclk_sdmmc_set_rate,
+	.set_status = clk_scmi_dclk_sdmmc_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_aclk_secure_ns_ops = {
+	.get_rate = clk_scmi_aclk_secure_ns_get_rate,
+	.set_rate = clk_scmi_aclk_secure_ns_set_rate,
+	.set_status = clk_scmi_aclk_secure_ns_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_hclk_secure_ns_ops = {
+	.get_rate = clk_scmi_hclk_secure_ns_get_rate,
+	.set_rate = clk_scmi_hclk_secure_ns_set_rate,
+	.set_status = clk_scmi_hclk_secure_ns_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_tclk_wdt_ops = {
+	.get_rate = clk_scmi_tclk_wdt_get_rate,
+	.set_status = clk_scmi_tclk_wdt_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_keyladder_core_ops = {
+	.get_rate = clk_scmi_keyladder_core_get_rate,
+	.set_rate = clk_scmi_keyladder_core_set_rate,
+	.set_status = clk_scmi_keyladder_core_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_keyladder_rng_ops = {
+	.get_rate = clk_scmi_keyladder_rng_get_rate,
+	.set_rate = clk_scmi_keyladder_rng_set_rate,
+	.set_status = clk_scmi_keyladder_rng_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_aclk_secure_s_ops = {
+	.get_rate = clk_scmi_aclk_secure_s_get_rate,
+	.set_rate = clk_scmi_aclk_secure_s_set_rate,
+	.set_status = clk_scmi_aclk_secure_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_hclk_secure_s_ops = {
+	.get_rate = clk_scmi_hclk_secure_s_get_rate,
+	.set_rate = clk_scmi_hclk_secure_s_set_rate,
+	.set_status = clk_scmi_hclk_secure_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_pclk_secure_s_ops = {
+	.get_rate = clk_scmi_pclk_secure_s_get_rate,
+	.set_rate = clk_scmi_pclk_secure_s_set_rate,
+	.set_status = clk_scmi_pclk_secure_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_crypto_rng_ops = {
+	.get_rate = clk_scmi_crypto_rng_get_rate,
+	.set_rate = clk_scmi_crypto_rng_set_rate,
+	.set_status = clk_scmi_crypto_rng_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_crypto_core_ops = {
+	.get_rate = clk_scmi_crypto_core_get_rate,
+	.set_rate = clk_scmi_crypto_core_set_rate,
+	.set_status = clk_scmi_crypto_core_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_crypto_pka_ops = {
+	.get_rate = clk_scmi_crypto_pka_get_rate,
+	.set_rate = clk_scmi_crypto_pka_set_rate,
+	.set_status = clk_scmi_crypto_pka_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_spll_ops = {
+	.get_rate = clk_scmi_spll_get_rate,
+	.set_rate = clk_scmi_spll_set_rate,
+	.set_status = clk_scmi_spll_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_hclk_sd_ops = {
+	.get_rate = clk_scmi_hclk_sd_get_rate,
+	.set_status = clk_scmi_hclk_sd_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_crypto_rng_s_ops = {
+	.get_rate = clk_scmi_crypto_rng_s_get_rate,
+	.set_rate = clk_scmi_crypto_rng_s_set_rate,
+	.set_status = clk_scmi_crypto_rng_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_crypto_core_s_ops = {
+	.get_rate = clk_scmi_crypto_core_s_get_rate,
+	.set_rate = clk_scmi_crypto_core_s_set_rate,
+	.set_status = clk_scmi_crypto_core_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_crypto_pka_s_ops = {
+	.get_rate = clk_scmi_crypto_pka_s_get_rate,
+	.set_rate = clk_scmi_crypto_pka_s_set_rate,
+	.set_status = clk_scmi_crypto_pka_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_a_crypto_s_ops = {
+	.get_rate = clk_scmi_a_crypto_s_get_rate,
+	.set_rate = clk_scmi_a_crypto_s_set_rate,
+	.set_status = clk_scmi_a_crypto_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_h_crypto_s_ops = {
+	.get_rate = clk_scmi_h_crypto_s_get_rate,
+	.set_rate = clk_scmi_h_crypto_s_set_rate,
+	.set_status = clk_scmi_h_crypto_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_p_crypto_s_ops = {
+	.get_rate = clk_scmi_p_crypto_s_get_rate,
+	.set_rate = clk_scmi_p_crypto_s_set_rate,
+	.set_status = clk_scmi_p_crypto_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_a_keylad_s_ops = {
+	.get_rate = clk_scmi_a_keylad_s_get_rate,
+	.set_rate = clk_scmi_a_keylad_s_set_rate,
+	.set_status = clk_scmi_a_keylad_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_h_keylad_s_ops = {
+	.get_rate = clk_scmi_h_keylad_s_get_rate,
+	.set_rate = clk_scmi_h_keylad_s_set_rate,
+	.set_status = clk_scmi_h_keylad_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_p_keylad_s_ops = {
+	.get_rate = clk_scmi_p_keylad_s_get_rate,
+	.set_rate = clk_scmi_p_keylad_s_set_rate,
+	.set_status = clk_scmi_p_keylad_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_trng_s_ops = {
+	.get_rate = clk_scmi_trng_s_get_rate,
+	.set_rate = clk_scmi_trng_s_set_rate,
+	.set_status = clk_scmi_trng_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_h_trng_s_ops = {
+	.get_rate = clk_scmi_h_trng_s_get_rate,
+	.set_rate = clk_scmi_h_trng_s_set_rate,
+	.set_status = clk_scmi_h_trng_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_p_otpc_s_ops = {
+	.get_rate = clk_scmi_p_otpc_s_get_rate,
+	.set_rate = clk_scmi_p_otpc_s_set_rate,
+	.set_status = clk_scmi_p_otpc_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_otpc_s_ops = {
+	.get_rate = clk_scmi_otpc_s_get_rate,
+	.set_status = clk_scmi_otpc_s_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_otp_phy_ops = {
+	.get_rate = clk_scmi_otp_phy_get_rate,
+	.set_status = clk_scmi_otp_phy_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_otpc_rd_ops = {
+	.get_rate = clk_scmi_otpc_rd_get_rate,
+	.set_status = clk_scmi_otpc_rd_set_status,
+};
+
+static const struct rk_clk_ops clk_scmi_otpc_arb_ops = {
+	.get_rate = clk_scmi_otpc_arb_get_rate,
+	.set_status = clk_scmi_otpc_arb_set_status,
+};
+
+rk_scmi_clock_t clock_table[] = {
+	RK3588_SCMI_CLOCK(SCMI_CLK_CPUL, "scmi_clk_cpul", &clk_scmi_cpul_ops, rk3588_cpul_rates, ARRAY_SIZE(rk3588_cpul_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_CLK_DSU, "scmi_clk_dsu", &clk_scmi_dsu_ops, rk3588_cpul_rates, ARRAY_SIZE(rk3588_cpul_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_CLK_CPUB01, "scmi_clk_cpub01", &clk_scmi_cpub01_ops, rk3588_cpub_rates, ARRAY_SIZE(rk3588_cpub_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_CLK_CPUB23, "scmi_clk_cpub23", &clk_scmi_cpub23_ops, rk3588_cpub_rates, ARRAY_SIZE(rk3588_cpub_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_CLK_DDR, "scmi_clk_ddr", NULL, NULL, 0, false),
+	RK3588_SCMI_CLOCK(SCMI_CLK_GPU, "scmi_clk_gpu", &clk_scmi_gpu_ops, rk3588_gpu_rates, ARRAY_SIZE(rk3588_gpu_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_CLK_NPU, "scmi_clk_npu", &clk_scmi_npu_ops, rk3588_gpu_rates, ARRAY_SIZE(rk3588_gpu_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_CLK_SBUS, "scmi_clk_sbus", &clk_scmi_sbus_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_PCLK_SBUS, "scmi_pclk_sbus", &clk_scmi_pclk_sbus_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_CCLK_SD, "scmi_cclk_sd", &clk_scmi_cclk_sdmmc_ops, rk3588_sdmmc_rates, ARRAY_SIZE(rk3588_sdmmc_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_DCLK_SD, "scmi_dclk_sd", &clk_scmi_dclk_sdmmc_ops, rk3588_sdmmc_rates, ARRAY_SIZE(rk3588_sdmmc_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_ACLK_SECURE_NS, "scmi_aclk_se_ns", &clk_scmi_aclk_secure_ns_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_HCLK_SECURE_NS, "scmi_hclk_se_ns", &clk_scmi_hclk_secure_ns_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_TCLK_WDT, "scmi_tclk_wdt", &clk_scmi_tclk_wdt_ops, NULL, 0, false),
+	RK3588_SCMI_CLOCK(SCMI_KEYLADDER_CORE, "scmi_keylad_c", &clk_scmi_keyladder_core_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_KEYLADDER_RNG, "scmi_keylad_r", &clk_scmi_keyladder_rng_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_ACLK_SECURE_S, "scmi_aclk_se_s", &clk_scmi_aclk_secure_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_HCLK_SECURE_S, "scmi_hclk_se_s", &clk_scmi_hclk_secure_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_PCLK_SECURE_S, "scmi_pclk_se_s", &clk_scmi_pclk_secure_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_CRYPTO_RNG, "scmi_crypto_r", &clk_scmi_crypto_rng_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_CRYPTO_CORE, "scmi_crypto_c", &clk_scmi_crypto_core_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_CRYPTO_PKA, "scmi_crypto_p", &clk_scmi_crypto_pka_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_SPLL, "scmi_spll", &clk_scmi_spll_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), false),
+	RK3588_SCMI_CLOCK(SCMI_HCLK_SD, "scmi_hclk_sd", &clk_scmi_hclk_sd_ops, NULL, 0, false),
+	RK3588_SCMI_CLOCK(SCMI_CRYPTO_RNG_S, "scmi_crypto_r_s", &clk_scmi_crypto_rng_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_CRYPTO_CORE_S, "scmi_crypto_c_s", &clk_scmi_crypto_core_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_CRYPTO_PKA_S, "scmi_crypto_p_s", &clk_scmi_crypto_pka_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_A_CRYPTO_S, "scmi_a_crypto_s", &clk_scmi_a_crypto_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_H_CRYPTO_S, "scmi_h_crypto_s", &clk_scmi_h_crypto_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_P_CRYPTO_S, "scmi_p_crypto_s", &clk_scmi_p_crypto_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_A_KEYLADDER_S, "scmi_a_keylad_s", &clk_scmi_a_keylad_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_H_KEYLADDER_S, "scmi_h_keylad_s", &clk_scmi_h_keylad_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_P_KEYLADDER_S, "scmi_p_keylad_s", &clk_scmi_p_keylad_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_TRNG_S, "scmi_trng_s", &clk_scmi_trng_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_H_TRNG_S, "scmi_h_trng_s", &clk_scmi_h_trng_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_P_OTPC_S, "scmi_p_otpc_s", &clk_scmi_p_otpc_s_ops, rk3588_sbus_rates, ARRAY_SIZE(rk3588_sbus_rates), true),
+	RK3588_SCMI_CLOCK(SCMI_OTPC_S, "scmi_otpc_s", &clk_scmi_otpc_s_ops, NULL, 0, true),
+	RK3588_SCMI_CLOCK(SCMI_OTP_PHY, "scmi_otp_phy", &clk_scmi_otp_phy_ops, NULL, 0, false),
+	RK3588_SCMI_CLOCK(SCMI_OTPC_AUTO_RD, "scmi_otpc_rd", &clk_scmi_otpc_rd_ops, NULL, 0, false),
+	RK3588_SCMI_CLOCK(SCMI_OTPC_ARB, "scmi_otpc_arb", &clk_scmi_otpc_arb_ops, NULL, 0, false),
+};
+
+size_t rockchip_scmi_clock_count(unsigned int agent_id __unused)
+{
+	return ARRAY_SIZE(clock_table);
+}
+
+rk_scmi_clock_t *rockchip_scmi_get_clock(uint32_t agent_id __unused,
+					 uint32_t clock_id)
+{
+	rk_scmi_clock_t *table = NULL;
+
+	if (clock_id < ARRAY_SIZE(clock_table))
+		table = &clock_table[clock_id];
+
+	if (table && !table->is_security)
+		return table;
+	else
+		return NULL;
+}
+
+void pvtplls_suspend(void)
+{
+	clk_cpul_set_rate(408000000, PLL_SEL_NOR);
+	clk_dsu_set_rate(408000000, PLL_SEL_NOR);
+	clk_cpub01_set_rate(408000000, PLL_SEL_NOR);
+	clk_cpub23_set_rate(408000000, PLL_SEL_NOR);
+}
+
+void pvtplls_resume(void)
+{
+	clk_cpul_set_rate(sys_clk_info.cpul_rate, PLL_SEL_AUTO);
+	clk_dsu_set_rate(sys_clk_info.dsu_rate, PLL_SEL_AUTO);
+	clk_cpub01_set_rate(sys_clk_info.cpub01_rate, PLL_SEL_AUTO);
+	clk_cpub23_set_rate(sys_clk_info.cpub23_rate, PLL_SEL_AUTO);
+}
+
+void sys_reset_pvtplls_prepare(void)
+{
+	clk_gpu_set_rate(100000000, PLL_SEL_NOR);
+	clk_npu_set_rate(100000000, PLL_SEL_NOR);
+	clk_cpul_set_rate(408000000, PLL_SEL_NOR);
+	clk_cpub01_set_rate(408000000, PLL_SEL_NOR);
+	clk_cpub23_set_rate(408000000, PLL_SEL_NOR);
+	clk_dsu_set_rate(408000000, PLL_SEL_NOR);
+}
+
+void rockchip_clock_init(void)
+{
+	/* set gpll src div to 0 for cpul */
+	mmio_write_32(DSUCRU_BASE + CRU_CLKSEL_CON(5), CLKDIV_5BITS_SHF(0U, 9));
+	/* set gpll src div to 0 for cpub01 */
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(0),
+		      CLKDIV_5BITS_SHF(0U, 1));
+	/* set gpll src div to 0 for cpu23 */
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(0),
+		      CLKDIV_5BITS_SHF(0U, 1));
+
+	mmio_write_32(BIGCORE0CRU_BASE + CRU_CLKSEL_CON(2),
+		      CPUB_PCLK_PATH_50M);
+	mmio_write_32(BIGCORE1CRU_BASE + CRU_CLKSEL_CON(2),
+		      CPUB_PCLK_PATH_50M);
+
+	mmio_write_32(DSUCRU_BASE + DSUCRU_CLKSEL_CON(4),
+		      CLKDIV_5BITS_SHF(5U, 0));
+	mmio_write_32(DSUCRU_BASE + DSUCRU_CLKSEL_CON(4),
+		      BITS_WITH_WMASK(PCLK_DSU_ROOT_SEL_GPLL,
+				      PCLK_DSU_ROOT_SEL_MASK,
+				      PCLK_DSU_ROOT_SEL_SHIFT));
+
+	sys_clk_info.cpul_table = rk3588_cpul_pvtpll_table;
+	sys_clk_info.cpul_rate_count = ARRAY_SIZE(rk3588_cpul_pvtpll_table);
+	sys_clk_info.cpub01_table = rk3588_cpub0_pvtpll_table;
+	sys_clk_info.cpub01_rate_count = ARRAY_SIZE(rk3588_cpub0_pvtpll_table);
+	sys_clk_info.cpub23_table = rk3588_cpub1_pvtpll_table;
+	sys_clk_info.cpub23_rate_count = ARRAY_SIZE(rk3588_cpub1_pvtpll_table);
+	memcpy(sys_clk_info.cpub23_table, sys_clk_info.cpub01_table,
+	       sys_clk_info.cpub01_rate_count * sizeof(*sys_clk_info.cpub01_table));
+	sys_clk_info.gpu_table = rk3588_gpu_pvtpll_table;
+	sys_clk_info.gpu_rate_count = ARRAY_SIZE(rk3588_gpu_pvtpll_table);
+	sys_clk_info.npu_table = rk3588_npu_pvtpll_table;
+	sys_clk_info.npu_rate_count = ARRAY_SIZE(rk3588_npu_pvtpll_table);
+}
diff --git a/plat/rockchip/rk3588/drivers/scmi/rk3588_clk.h b/plat/rockchip/rk3588/drivers/scmi/rk3588_clk.h
new file mode 100644
index 0000000..66fddaa
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/scmi/rk3588_clk.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CLOCK_H__
+#define __CLOCK_H__
+
+/* scmi-clocks indices */
+
+#define SCMI_CLK_CPUL			0
+#define SCMI_CLK_DSU			1
+#define SCMI_CLK_CPUB01			2
+#define SCMI_CLK_CPUB23			3
+#define SCMI_CLK_DDR			4
+#define SCMI_CLK_GPU			5
+#define SCMI_CLK_NPU			6
+#define SCMI_CLK_SBUS			7
+#define SCMI_PCLK_SBUS			8
+#define SCMI_CCLK_SD			9
+#define SCMI_DCLK_SD			10
+#define SCMI_ACLK_SECURE_NS		11
+#define SCMI_HCLK_SECURE_NS		12
+#define SCMI_TCLK_WDT			13
+#define SCMI_KEYLADDER_CORE		14
+#define SCMI_KEYLADDER_RNG		15
+#define SCMI_ACLK_SECURE_S		16
+#define SCMI_HCLK_SECURE_S		17
+#define SCMI_PCLK_SECURE_S		18
+#define SCMI_CRYPTO_RNG			19
+#define SCMI_CRYPTO_CORE		20
+#define SCMI_CRYPTO_PKA			21
+#define SCMI_SPLL			22
+#define SCMI_HCLK_SD			23
+#define SCMI_CRYPTO_RNG_S		24
+#define SCMI_CRYPTO_CORE_S		25
+#define SCMI_CRYPTO_PKA_S		26
+#define SCMI_A_CRYPTO_S			27
+#define SCMI_H_CRYPTO_S			28
+#define SCMI_P_CRYPTO_S			29
+#define SCMI_A_KEYLADDER_S		30
+#define SCMI_H_KEYLADDER_S		31
+#define SCMI_P_KEYLADDER_S		32
+#define SCMI_TRNG_S			33
+#define SCMI_H_TRNG_S			34
+#define SCMI_P_OTPC_S			35
+#define SCMI_OTPC_S			36
+#define SCMI_OTP_PHY			37
+#define SCMI_OTPC_AUTO_RD		38
+#define SCMI_OTPC_ARB			39
+
+/******** DSUCRU **************************************/
+#define DSUCRU_CLKSEL_CON(n)		(0x0300 + (n) * 4)
+
+/********Name=DSUCRU_CLKSEL_CON04,Offset=0x310********/
+#define PCLK_DSU_ROOT_SEL_SHIFT		5
+#define PCLK_DSU_ROOT_SEL_MASK		0x3
+#define PCLK_DSU_ROOT_SEL_GPLL		0x3
+
+/********Name=SECURE_SOFTRST_CON00,Offset=0xA00********/
+#define SRST_A_SECURE_NS_BIU		10
+#define SRST_H_SECURE_NS_BIU		11
+#define SRST_A_SECURE_S_BIU		12
+#define SRST_H_SECURE_S_BIU		13
+#define SRST_P_SECURE_S_BIU		14
+#define SRST_CRYPTO_CORE		15
+/********Name=SECURE_SOFTRST_CON01,Offset=0xA04********/
+#define SRST_CRYPTO_PKA			16
+#define SRST_CRYPTO_RNG			17
+#define SRST_A_CRYPTO			18
+#define SRST_H_CRYPTO			19
+#define SRST_KEYLADDER_CORE		25
+#define SRST_KEYLADDER_RNG		26
+#define SRST_A_KEYLADDER		27
+#define SRST_H_KEYLADDER		28
+#define SRST_P_OTPC_S			29
+#define SRST_OTPC_S			30
+#define SRST_WDT_S			31
+/********Name=SECURE_SOFTRST_CON02,Offset=0xA08********/
+#define SRST_T_WDT_S			32
+#define SRST_H_BOOTROM			33
+#define SRST_A_DCF			34
+#define SRST_P_DCF			35
+#define SRST_H_BOOTROM_NS		37
+#define SRST_P_KEYLADDER		46
+#define SRST_H_TRNG_S			47
+/********Name=SECURE_SOFTRST_CON03,Offset=0xA0C********/
+#define SRST_H_TRNG_NS			48
+#define SRST_D_SDMMC_BUFFER		49
+#define SRST_H_SDMMC			50
+#define SRST_H_SDMMC_BUFFER		51
+#define SRST_SDMMC			52
+#define SRST_P_TRNG_CHK			53
+#define SRST_TRNG_S			54
+
+#define SRST_INVALID			55
+
+void pvtplls_suspend(void);
+void pvtplls_resume(void);
+
+void rockchip_clock_init(void);
+
+#endif
diff --git a/plat/rockchip/rk3588/drivers/scmi/rk3588_rstd.c b/plat/rockchip/rk3588/drivers/scmi/rk3588_rstd.c
new file mode 100644
index 0000000..50b99e7
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/scmi/rk3588_rstd.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <drivers/delay_timer.h>
+#include <drivers/scmi.h>
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#include <plat_private.h>
+#include "rk3588_clk.h"
+#include <scmi_rstd.h>
+#include <soc.h>
+
+#define DEFAULT_RESET_DOM_ATTRIBUTE	0
+
+#define RK3588_SCMI_RESET(_id, _name, _attribute, _ops)		\
+{								\
+	.id = _id,						\
+	.name = _name,						\
+	.attribute = _attribute,				\
+	.rstd_ops = _ops,					\
+}
+
+static int rk3588_reset_explicit(rk_scmi_rstd_t *reset_domain,
+				 bool assert_not_deassert)
+{
+	int bank = reset_domain->id / 16;
+	int offset = reset_domain->id % 16;
+
+	mmio_write_32(SCRU_BASE + CRU_SOFTRST_CON(bank),
+		      BITS_WITH_WMASK(assert_not_deassert, 0x1U, offset));
+	return SCMI_SUCCESS;
+}
+
+static struct rk_scmi_rstd_ops rk3588_reset_domain_ops = {
+	.reset_explicit = rk3588_reset_explicit,
+};
+
+static rk_scmi_rstd_t rk3588_reset_domain_table[] = {
+	RK3588_SCMI_RESET(SRST_CRYPTO_CORE, "scmi_sr_cy_core", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_CRYPTO_PKA, "scmi_sr_cy_pka", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_CRYPTO_RNG, "scmi_sr_cy_rng", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_A_CRYPTO, "scmi_sr_a_cy", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_H_CRYPTO, "scmi_sr_h_cy", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_KEYLADDER_CORE, "scmi_sr_k_core", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_KEYLADDER_RNG, "scmi_sr_k_rng", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_P_OTPC_S, "scmi_sr_p_otp", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_OTPC_S, "scmi_sr_otp", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_WDT_S, "scmi_sr_wdt", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_T_WDT_S, "scmi_sr_t_wdt", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_H_BOOTROM, "scmi_sr_h_boot", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_P_KEYLADDER, "scmi_sr_p_ky", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_H_TRNG_S, "scmi_sr_h_trng", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_H_TRNG_NS, "scmi_sr_t_trng", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_D_SDMMC_BUFFER, "scmi_sr_d_sd", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_H_SDMMC, "scmi_sr_h_sd", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_H_SDMMC_BUFFER, "scmi_sr_h_sd_b", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_SDMMC, "scmi_sr_sd", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_P_TRNG_CHK, "scmi_sr_p_trng", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_TRNG_S, "scmi_sr_trng", DEFAULT_RESET_DOM_ATTRIBUTE, &rk3588_reset_domain_ops),
+	RK3588_SCMI_RESET(SRST_INVALID, "scmi_sr_invalid", DEFAULT_RESET_DOM_ATTRIBUTE, NULL),
+};
+
+static rk_scmi_rstd_t *
+rockchip_get_reset_domain_table(int id)
+{
+	rk_scmi_rstd_t *reset = rk3588_reset_domain_table;
+	int i = 0, cnt = ARRAY_SIZE(rk3588_reset_domain_table);
+
+	for (i = 0; i < cnt; i++) {
+		if (reset->id == id)
+			return &rk3588_reset_domain_table[i];
+		reset++;
+	}
+
+	return &rk3588_reset_domain_table[cnt - 1];
+}
+
+rk_scmi_rstd_t *rockchip_scmi_get_rstd(unsigned int agent_id,
+				       unsigned int scmi_id)
+
+{
+	return rockchip_get_reset_domain_table(scmi_id);
+}
+
+size_t rockchip_scmi_rstd_count(unsigned int agent_id)
+{
+	return SRST_TRNG_S;
+}
+
diff --git a/plat/rockchip/rk3588/drivers/secure/secure.c b/plat/rockchip/rk3588/drivers/secure/secure.c
new file mode 100644
index 0000000..fc9f211
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/secure/secure.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <lib/mmio.h>
+
+#include <platform_def.h>
+
+#include <secure.h>
+#include <soc.h>
+
+static void secure_fw_master_init(void)
+{
+	uint32_t i;
+
+	/* ddr_mcu can access all ddr-regions */
+	mmio_write_32(FIREWALL_DDR_BASE + FIREWALL_DDR_MST(1), 0x0000ffff);
+	/* dcf/crypto_s can access all ddr-regions */
+	mmio_write_32(FIREWALL_DDR_BASE + FIREWALL_DDR_MST(14), 0x00000000);
+	/* dsu_mp_sec can access all ddr-regions.
+	 * DSU access memory [f000_0000~ff00_0000] through MP in firewall_ddr.
+	 */
+	mmio_write_32(FIREWALL_DDR_BASE + FIREWALL_DDR_MST(36), 0xffff0000);
+
+	/* all other ns-master can't access all ddr-regions */
+	for (i = 0; i < FIREWALL_DDR_MST_CNT; i++) {
+		if (i == 1 || i == 14 || i == 36)
+			continue;
+
+		mmio_write_32(FIREWALL_DDR_BASE + FIREWALL_DDR_MST(i), 0xffffffff);
+	}
+
+	/* mcu_pmu can access all sram-regions */
+	mmio_write_32(FIREWALL_SYSMEM_BASE + FIREWALL_SYSMEM_MST(19), 0x000000ff);
+	/* dsu mp-sec can access all sram-regions */
+	mmio_write_32(FIREWALL_SYSMEM_BASE + FIREWALL_SYSMEM_MST(38), 0x000000ff);
+	/* nsp_dsu2main_sec can access all sram-regions */
+	mmio_write_32(FIREWALL_SYSMEM_BASE + FIREWALL_SYSMEM_MST(41), 0x00000000);
+
+	/* all ns-master can't access all sram-regions */
+	for (i = 0; i < FIREWALL_SYSMEM_MST_CNT; i++) {
+		if (i == 19 || i == 38 || i == 41)
+			continue;
+
+		mmio_write_32(FIREWALL_SYSMEM_BASE + FIREWALL_SYSMEM_MST(i),
+			      0x00ff00ff);
+	}
+
+	/* dsu-ns can't access all ddr-regions, dsu-s can access all ddr-regions */
+	mmio_write_32(FIREWALL_DSU_BASE + FIREWALL_DSU_MST(0), 0xffffffff);
+	mmio_write_32(FIREWALL_DSU_BASE + FIREWALL_DSU_MST(1), 0x00000000);
+	dsb();
+	isb();
+}
+
+/* unit: Mb */
+static void dsu_fw_rgn_config(uint64_t base_mb, uint64_t top_mb, int rgn_id)
+{
+	int i;
+
+	if (rgn_id >= FIREWALL_DSU_RGN_CNT || rgn_id < 0) {
+		ERROR("%s regions-id:%d is invalid!\n", __func__, rgn_id);
+		panic();
+	}
+
+	mmio_write_32(FIREWALL_DSU_BASE + FIREWALL_DSU_RGN(rgn_id),
+		      RG_MAP_SECURE(top_mb, base_mb));
+
+	for (i = 0; i < DDR_CHN_CNT; i++)
+		mmio_setbits_32(FIREWALL_DSU_BASE + FIREWALL_DSU_CON(i),
+				BIT(rgn_id));
+}
+
+/* unit: Mb */
+static void ddr_fw_rgn_config(uint64_t base_mb, uint64_t top_mb, int rgn_id)
+{
+	if (rgn_id >= FIREWALL_DDR_RGN_CNT || rgn_id < 0) {
+		ERROR("%s regions-id:%d is invalid!\n", __func__, rgn_id);
+		panic();
+	}
+
+	mmio_write_32(FIREWALL_DDR_BASE + FIREWALL_DDR_RGN(rgn_id),
+		      RG_MAP_SECURE(top_mb, base_mb));
+
+	/* enable region */
+	mmio_setbits_32(FIREWALL_DDR_BASE + FIREWALL_DDR_CON,
+			BIT(rgn_id));
+}
+
+/* Unit: Kb */
+static void sram_fw_rgn_config(uint64_t base_kb, uint64_t top_kb, int rgn_id)
+{
+	if (rgn_id >= FIREWALL_SYSMEM_RGN_CNT || rgn_id < 0) {
+		ERROR("%s regions-id:%d is invalid!\n", __func__, rgn_id);
+		panic();
+	}
+
+	mmio_write_32(FIREWALL_SYSMEM_BASE + FIREWALL_SYSMEM_RGN(rgn_id),
+		      RG_MAP_SRAM_SECURE(top_kb, base_kb));
+
+	/* enable region */
+	mmio_setbits_32(FIREWALL_SYSMEM_BASE + FIREWALL_SYSMEM_CON, BIT(rgn_id));
+}
+
+static void secure_region_init(void)
+{
+	uint32_t i;
+
+	/* disable all region first except region0 */
+	mmio_clrbits_32(FIREWALL_DDR_BASE + FIREWALL_DDR_CON, 0xfffe);
+	for (i = 0; i < FIREWALL_DSU_CON_CNT; i++)
+		mmio_clrbits_32(FIREWALL_DSU_BASE + FIREWALL_DSU_CON(i), 0xfffe);
+	mmio_clrbits_32(FIREWALL_SYSMEM_BASE + FIREWALL_SYSMEM_CON, 0xfe);
+
+	secure_fw_master_init();
+
+	/* Use FW_DDR_RGN0_REG to config 0~1M space to secure */
+	dsu_fw_rgn_config(0, 1, 0);
+	ddr_fw_rgn_config(0, 1, 0);
+
+	/* Use FIREWALL_SYSMEM_RGN0 to config SRAM_ENTRY code(0~4k of sram) to secure */
+	sram_fw_rgn_config(0, 4, 0);
+	/* For 0xffff0000~0xffffffff, use FIREWALL_SYSMEM_RGN7 to config
+	 * 960~1024k of sram to secure.
+	 */
+	sram_fw_rgn_config(960, 1024, 7);
+}
+
+void secure_timer_init(void)
+{
+	/* gpu's cntvalue comes from stimer1 channel_5 */
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG,
+		      TIMER_DIS);
+
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_LOAD_COUNT0, 0xffffffff);
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_LOAD_COUNT1, 0xffffffff);
+
+	/* auto reload & enable the timer */
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG,
+		      TIMER_EN | TIMER_FMODE);
+}
+
+void sgrf_init(void)
+{
+	uint32_t i;
+
+	secure_region_init();
+
+	/* config master ddr_mcu_prot|dcf_wr|dcf_rd as secure */
+	mmio_write_32(BUSSGRF_BASE + SGRF_SOC_CON(14), 0x001f0011);
+	mmio_write_32(BUSSGRF_BASE + SGRF_SOC_CON(15), 0xffffffff);
+	mmio_write_32(BUSSGRF_BASE + SGRF_SOC_CON(16), 0x03ff03ff);
+
+	/* config slave mailbox_mcu_ddr as secure */
+	mmio_write_32(BUSSGRF_BASE + SGRF_FIREWALL_CON(4), 0xffff2000);
+	/* config slave int256mux4_mcu_ddr|int256mux4_mcu_pmu as secure */
+	mmio_write_32(BUSSGRF_BASE + SGRF_FIREWALL_CON(5), 0xffff0060);
+	/* config slave ddrgrf*|dma2ddr|ddrphy*_cru|umctl* as secure */
+	mmio_write_32(BUSSGRF_BASE + SGRF_FIREWALL_CON(24), 0xffff0fbf);
+	/* config slave ddrphy*|ddr_stanby*|ddr_mcu_timer|ddr_mcu_wdt as secure */
+	mmio_write_32(BUSSGRF_BASE + SGRF_FIREWALL_CON(25), 0xffff03ff);
+
+	/* config all other slave as ns */
+	for (i = 0; i < SGRF_FIREWALL_CON_CNT; i++) {
+		if (i == 4 || i == 5 || i == 24 || i == 25)
+			continue;
+
+		mmio_write_32(BUSSGRF_BASE + SGRF_FIREWALL_CON(i), 0xffff0000);
+	}
+
+	/* config vad_hprot non-secure, pmu_mcu_hprot as secure */
+	mmio_write_32(PMU1SGRF_BASE + PMU1SGRF_SOC_CON(0), 0x00180010);
+	/* config pmu1, pmu0, pmu_sram as secure */
+	mmio_write_32(PMU1SGRF_BASE + PMU1SGRF_SOC_CON(1), 0xefbe6020);
+	/* config remap_pmu_mem, h_pmu_mem as secure */
+	mmio_write_32(PMU1SGRF_BASE + PMU1SGRF_SOC_CON(2), 0x01f900c0);
+
+	/* disable dp encryption */
+	mmio_write_32(BUSSGRF_BASE + SGRF_SOC_CON(13), 0x00180018);
+
+	/* select grf config for pcie ats */
+	mmio_write_32(BUSSGRF_BASE + SGRF_SOC_CON(17), 0x11111111);
+	mmio_write_32(BUSSGRF_BASE + SGRF_SOC_CON(18), 0x11111111);
+	mmio_write_32(BUSSGRF_BASE + SGRF_SOC_CON(19), 0x00110011);
+}
diff --git a/plat/rockchip/rk3588/drivers/secure/secure.h b/plat/rockchip/rk3588/drivers/secure/secure.h
new file mode 100644
index 0000000..d9c234f
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/secure/secure.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SECURE_H
+#define SECURE_H
+
+/* DSUSGRF */
+#define DSU_SGRF_SOC_CON(i)		((i) * 4)
+#define DSUSGRF_SOC_CON(i)		((i) * 4)
+#define DSUSGRF_SOC_CON_CNT		13
+#define DSUSGRF_DDR_HASH_CON(i)		(0x240 + (i) * 4)
+#define DSUSGRF_DDR_HASH_CON_CNT	8
+
+/* PMUSGRF */
+#define PMU1SGRF_SOC_CON(n)		((n) * 4)
+
+/* SGRF */
+#define SGRF_SOC_CON(i)			((i) * 4)
+#define SGRF_FIREWALL_CON(i)		(0x240 + (i) * 4)
+#define SGRF_FIREWALL_CON_CNT		32
+
+/* ddr firewall */
+#define FIREWALL_DDR_RGN(i)		((i) * 0x4)
+#define FIREWALL_DDR_RGN_CNT		16
+#define FIREWALL_DDR_MST(i)		(0x40 + (i) * 0x4)
+#define FIREWALL_DDR_MST_CNT		42
+#define FIREWALL_DDR_CON		0xf0
+
+#define FIREWALL_SYSMEM_RGN(i)		((i) * 0x4)
+#define FIREWALL_SYSMEM_RGN_CNT		8
+#define FIREWALL_SYSMEM_MST(i)		(0x40 + (i) * 0x4)
+#define FIREWALL_SYSMEM_MST_CNT		43
+#define FIREWALL_SYSMEM_CON		0xf0
+
+#define FIREWALL_DSU_RGN(i)		((i) * 0x4)
+#define FIREWALL_DSU_RGN_CNT		16
+#define FIREWALL_DSU_MST(i)		(0x40 + (i) * 0x4)
+#define FIREWALL_DSU_MST_CNT		2
+#define FIREWALL_DSU_CON(i)		(0xf0 + (i) * 4)
+#define FIREWALL_DSU_CON_CNT		4
+
+#define PLAT_MAX_DDR_CAPACITY_MB	0x8000	/* for 32Gb */
+#define RG_MAP_SECURE(top, base)	\
+	(((((top) - 1) & 0x7fff) << 16) | ((base) & 0x7fff))
+#define RG_MAP_SRAM_SECURE(top_kb, base_kb)	\
+	(((((top_kb) / 4 - 1) & 0xff) << 16) | ((base_kb) / 4 & 0xff))
+
+void secure_timer_init(void);
+void sgrf_init(void);
+
+#endif /* SECURE_H */
diff --git a/plat/rockchip/rk3588/drivers/soc/soc.c b/plat/rockchip/rk3588/drivers/soc/soc.c
new file mode 100644
index 0000000..6db81ee
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/soc/soc.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <drivers/console.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_compat.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmu.h>
+
+#include <plat_private.h>
+#include <rk3588_clk.h>
+#include <secure.h>
+#include <soc.h>
+
+#define RK3588_DEV_RNG0_BASE	0xf0000000
+#define RK3588_DEV_RNG0_SIZE	0x0ffff000
+
+const mmap_region_t plat_rk_mmap[] = {
+	MAP_REGION_FLAT(RK3588_DEV_RNG0_BASE, RK3588_DEV_RNG0_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DDR_SHARE_MEM, DDR_SHARE_SIZE,
+			MT_DEVICE | MT_RW | MT_NS),
+	{ 0 }
+};
+
+/* The RockChip power domain tree descriptor */
+const unsigned char rockchip_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	PLATFORM_SYSTEM_COUNT,
+	/* No of children for the root node */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of children for the first cluster node */
+	PLATFORM_CLUSTER0_CORE_COUNT,
+	/* No of children for the second cluster node */
+	PLATFORM_CLUSTER1_CORE_COUNT
+};
+
+void timer_hp_init(void)
+{
+	if ((mmio_read_32(TIMER_HP_BASE + TIMER_HP_CTRL) & 0x1) != 0)
+		return;
+
+	mmio_write_32(TIMER_HP_BASE + TIMER_HP_CTRL, 0x0);
+	dsb();
+	mmio_write_32(TIMER_HP_BASE + TIMER_HP_LOAD_COUNT0, 0xffffffff);
+	mmio_write_32(TIMER_HP_BASE + TIMER_HP_LOAD_COUNT1, 0xffffffff);
+	mmio_write_32(TIMER_HP_BASE + TIMER_HP_INT_EN, 0);
+	dsb();
+	mmio_write_32(TIMER_HP_BASE + TIMER_HP_CTRL, 0x1);
+}
+
+static void system_reset_init(void)
+{
+	/* enable wdt_ns0~4 trigger global reset and select first reset.
+	 * enable tsadc trigger global reset and select first reset.
+	 * enable global reset and wdt trigger pmu reset.
+	 * select first reset trigger pmu reset.s
+	 */
+	mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, 0xffdf);
+
+	/* enable wdt_s, wdt_ns reset */
+	mmio_write_32(BUSSGRF_BASE + SGRF_SOC_CON(2), 0x0c000c00);
+
+	/* reset width = 0xffff */
+	mmio_write_32(PMU1GRF_BASE + PMU1GRF_SOC_CON(1), 0xffffffff);
+
+	/* enable first/tsadc/wdt reset output */
+	mmio_write_32(PMU1SGRF_BASE + PMU1SGRF_SOC_CON(0), 0x00070007);
+
+	/* pmu1_grf pmu1_ioc hold */
+	mmio_write_32(PMU1GRF_BASE + PMU1GRF_SOC_CON(7), 0x30003000);
+
+	/* pmu1sgrf hold */
+	mmio_write_32(PMU1SGRF_BASE + PMU1SGRF_SOC_CON(14), 0x00200020);
+
+	/* select tsadc_shut_m0 ionmux*/
+	mmio_write_32(PMU0IOC_BASE + 0x0, 0x00f00020);
+}
+
+void plat_rockchip_soc_init(void)
+{
+	rockchip_clock_init();
+	secure_timer_init();
+	timer_hp_init();
+	system_reset_init();
+	sgrf_init();
+	rockchip_init_scmi_server();
+}
diff --git a/plat/rockchip/rk3588/drivers/soc/soc.h b/plat/rockchip/rk3588/drivers/soc/soc.h
new file mode 100644
index 0000000..9af179a
--- /dev/null
+++ b/plat/rockchip/rk3588/drivers/soc/soc.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_H__
+#define __SOC_H__
+
+enum pll_id {
+	APLL_ID,
+	DPLL_ID,
+	GPLL_ID,
+	CPLL_ID,
+	NPLL_ID,
+	VPLL_ID,
+};
+
+enum pmu_pll_id {
+	PPLL_ID = 0,
+	HPLL_ID
+};
+
+enum cru_mode_con00 {
+	CLK_APLL,
+	CLK_DPLL,
+	CLK_CPLL,
+	CLK_GPLL,
+	CLK_REVSERVED,
+	CLK_NPLL,
+	CLK_VPLL,
+	CLK_USBPLL,
+};
+
+#define KHz				1000
+#define MHz				(1000 * KHz)
+#define OSC_HZ				(24 * MHz)
+
+/* CRU */
+#define GLB_SRST_FST_CFG_VAL		0xfdb9
+
+#define CRU_PLLS_CON(pll_id, i)		(0x160 + (pll_id) * 0x20 + (i) * 0x4)
+#define CRU_PLL_CON(i)			((i) * 0x4)
+#define CRU_MODE_CON0			0x280
+#define CRU_CLKSEL_CON(i)		((i) * 0x4 + 0x300)
+#define CRU_CLKGATE_CON(i)		((i) * 0x4 + 0x800)
+#define CRU_CLKGATE_CON_CNT		78
+#define CRU_SOFTRST_CON(i)		((i) * 0x4 + 0xa00)
+#define CRU_GLB_CNT_TH			0xc00
+#define CRU_GLB_SRST_FST		0xc08
+#define CRU_GLB_SRST_SND		0xc0c
+#define CRU_GLB_RST_CON			0xc10
+#define CRU_GLB_RST_ST			0xc04
+#define CRU_SDIO_CON0			0xc24
+#define CRU_SDIO_CON1			0xc28
+#define CRU_SDMMC_CON0			0xc30
+#define CRU_SDMMC_CON1			0xc34
+#define CRU_AUTOCS_CON0(id)		(0xd00 + (id) * 8)
+#define CRU_AUTOCS_CON1(id)		(0xd04 + (id) * 8)
+
+#define CRU_AUTOCS_ID_CNT		74
+
+#define CRU_PLLCON0_M_MASK		0x3ff
+#define CRU_PLLCON0_M_SHIFT		0
+#define CRU_PLLCON1_P_MASK		0x3f
+#define CRU_PLLCON1_P_SHIFT		0
+#define CRU_PLLCON1_S_MASK		0x7
+#define CRU_PLLCON1_S_SHIFT		6
+#define CRU_PLLCON2_K_MASK		0xffff
+#define CRU_PLLCON2_K_SHIFT		0
+#define CRU_PLLCON1_PWRDOWN		BIT(13)
+#define CRU_PLLCON6_LOCK_STATUS		BIT(15)
+
+#define CRU_BIGCPU02_RST_MSK		0x30
+#define CRU_BIGCPU13_RST_MSK		0x300
+
+#define PHPCRU_CLKGATE_CON		0x800
+#define PHPCRU_CLKGATE_CON_CNT		1
+
+#define SECURECRU_CLKGATE_CON(i)	((i) * 0x4 + 0x800)
+#define SECURECRU_CLKGATE_CON_CNT	4
+
+#define PMU1CRU_CLKGATE_CON_CNT		6
+
+/* CENTER GRF */
+#define CENTER_GRF_CON(i)		((i) * 4)
+
+/* PMU1GRF */
+#define PMU1GRF_SOC_CON(n)		((n) * 4)
+#define PMU1GRF_SOC_ST			0x60
+#define PMU1GRF_OS_REG(n)		(0x200 + ((n) * 4))
+
+#define PMU_MCU_HALT			BIT(7)
+#define PMU_MCU_SLEEP			BIT(9)
+#define PMU_MCU_DEEPSLEEP		BIT(10)
+#define PMU_MCU_STOP_MSK		\
+	(PMU_MCU_HALT | PMU_MCU_SLEEP | PMU_MCU_DEEPSLEEP)
+
+/* SYSGRF */
+#define SYS_GRF_NOC_CON(n)		(0x100 + (n) * 4)
+#define SYS_GRF_SOC_CON(n)		(0x300 + (n) * 4)
+#define SYS_GRF_SOC_STATUS(n)		(0x380 + (n) * 4)
+
+#define SYS_GRF_LITTLE_CPUS_WFE		0xf
+#define SYS_GRF_CORE0_CPUS_WFE		0x30
+#define SYS_GRF_CORE1_CPUS_WFE		0xc0
+#define SYS_GRF_BIG_CPUS_WFE		0xf0
+#define SYS_GRF_LITTLE_CPUS_WFI		0xf00
+#define SYS_GRF_CORE0_CPUS_WFI		0x3000
+#define SYS_GRF_CORE1_CPUS_WFI		0xc000
+
+/* pvtm */
+#define PVTM_CON(i)			(0x4 + (i) * 4)
+#define PVTM_INTEN			0x70
+#define PVTM_INTSTS			0x74
+#define PVTM_STATUS(i)			(0x80 + (i) * 4)
+#define PVTM_CALC_CNT			0x200
+
+enum pvtm_con0 {
+	pvtm_start = 0,
+	pvtm_osc_en = 1,
+	pvtm_osc_sel = 2,
+	pvtm_rnd_seed_en = 5,
+};
+
+/* timer */
+#define TIMER_LOAD_COUNT0		0x00
+#define TIMER_LOAD_COUNT1		0x04
+#define TIMER_CURRENT_VALUE0		0x08
+#define TIMER_CURRENT_VALUE1		0x0c
+#define TIMER_CONTROL_REG		0x10
+#define TIMER_INTSTATUS			0x18
+
+#define TIMER_DIS			0x0
+#define TIMER_EN			0x1
+
+#define TIMER_FMODE			(0x0 << 1)
+#define TIMER_RMODE			(0x1 << 1)
+
+#define STIMER0_CHN_BASE(n)		(STIMER0_BASE + 0x20 * (n))
+#define STIMER1_CHN_BASE(n)		(STIMER1_BASE + 0x20 * (n))
+
+/* cpu timer */
+#define TIMER_HP_REVISION		0x0
+#define TIMER_HP_CTRL			0x4
+#define TIMER_HP_INT_EN			0x8
+#define TIMER_HP_T24_GCD		0xc
+#define TIMER_HP_T32_GCD		0x10
+#define TIMER_HP_LOAD_COUNT0		0x14
+#define TIMER_HP_LOAD_COUNT1		0x18
+#define TIMER_HP_T24_DELAT_COUNT0	0x1c
+#define TIMER_HP_T24_DELAT_COUNT1	0x20
+#define TIMER_HP_CURR_32K_VALUE0	0x24
+#define TIMER_HP_CURR_32K_VALUE1	0x28
+#define TIMER_HP_CURR_TIMER_VALUE0	0x2c
+#define TIMER_HP_CURR_TIMER_VALUE1	0x30
+#define TIMER_HP_T24_32BEGIN0		0x34
+#define TIMER_HP_T24_32BEGIN1		0x38
+#define TIMER_HP_T32_24END0		0x3c
+#define TIMER_HP_T32_24END1		0x40
+#define TIMER_HP_BEGIN_END_VALID	0x44
+#define TIMER_HP_SYNC_REQ		0x48
+#define TIMER_HP_INTR_STATUS		0x4c
+
+ /* GPIO */
+#define GPIO_SWPORT_DR_L		0x0000
+#define GPIO_SWPORT_DR_H		0x0004
+#define GPIO_SWPORT_DDR_L		0x0008
+#define GPIO_SWPORT_DDR_H		0x000c
+#define GPIO_INT_EN_L			0x0010
+#define GPIO_INT_EN_H			0x0014
+#define GPIO_INT_MASK_L			0x0018
+#define GPIO_INT_MASK_H			0x001c
+#define GPIO_INT_TYPE_L			0x0020
+#define GPIO_INT_TYPE_H			0x0024
+#define GPIO_INT_POLARITY_L		0x0028
+#define GPIO_INT_POLARITY_H		0x002c
+#define GPIO_INT_BOTHEDGE_L		0x0030
+#define GPIO_INT_BOTHEDGE_H		0x0034
+#define GPIO_DEBOUNCE_L			0x0038
+#define GPIO_DEBOUNCE_H			0x003c
+#define GPIO_DBCLK_DIV_EN_L		0x0040
+#define GPIO_DBCLK_DIV_EN_H		0x0044
+#define GPIO_DBCLK_DIV_CON		0x0048
+#define GPIO_INT_STATUS			0x0050
+#define GPIO_INT_RAWSTATUS		0x0058
+#define GPIO_PORT_EOI_L			0x0060
+#define GPIO_PORT_EOI_H			0x0064
+#define GPIO_EXT_PORT			0x0070
+#define GPIO_VER_ID			0x0078
+
+/* DDRGRF */
+#define DDRGRF_CHA_CON(i)		((i) * 4)
+#define DDRGRF_CHB_CON(i)		(0x30 + (i) * 4)
+
+#define DDR_CHN_CNT			4
+
+#endif /* __SOC_H__ */
diff --git a/plat/rockchip/rk3588/include/plat.ld.S b/plat/rockchip/rk3588/include/plat.ld.S
new file mode 100644
index 0000000..e3ea9cc
--- /dev/null
+++ b/plat/rockchip/rk3588/include/plat.ld.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ROCKCHIP_PLAT_LD_S
+#define ROCKCHIP_PLAT_LD_S
+
+MEMORY {
+        PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE
+}
+
+SECTIONS
+{
+	. = PMUSRAM_BASE;
+
+	/*
+	 * pmu_cpuson_entrypoint request address
+	 * align 64K when resume, so put it in the
+	 * start of pmusram
+	 */
+	.text_pmusram : {
+		ASSERT(. == ALIGN(64 * 1024),
+			".pmusram.entry request 64K aligned.");
+		 KEEP(*(.pmusram.entry))
+		__bl31_pmusram_text_start = .;
+		*(.pmusram.text)
+		*(.pmusram.rodata)
+		. = ALIGN(PAGE_SIZE);
+		__bl31_pmusram_text_end = .;
+		__bl31_pmusram_data_start = .;
+		*(.pmusram.data)
+		. = ALIGN(PAGE_SIZE);
+		__bl31_pmusram_data_end = .;
+
+		ASSERT(__bl31_pmusram_data_end <= PMUSRAM_BASE + PMUSRAM_RSIZE,
+			".pmusram has exceeded its limit.");
+	} >PMUSRAM
+}
+
+#endif /* ROCKCHIP_PLAT_LD_S */
diff --git a/plat/rockchip/rk3588/include/plat_sip_calls.h b/plat/rockchip/rk3588/include/plat_sip_calls.h
new file mode 100644
index 0000000..bc4455f
--- /dev/null
+++ b/plat/rockchip/rk3588/include/plat_sip_calls.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_SIP_CALLS_H__
+#define __PLAT_SIP_CALLS_H__
+
+#define RK_PLAT_SIP_NUM_CALLS	0
+
+#endif /* __PLAT_SIP_CALLS_H__ */
diff --git a/plat/rockchip/rk3588/include/platform_def.h b/plat/rockchip/rk3588/include/platform_def.h
new file mode 100644
index 0000000..5946af0
--- /dev/null
+++ b/plat/rockchip/rk3588/include/platform_def.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <plat/common/common_def.h>
+
+#include <rk3588_def.h>
+
+#define DEBUG_XLAT_TABLE 0
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT		"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH		aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+#define PLATFORM_STACK_SIZE 0x800
+#elif IMAGE_BL1
+#define PLATFORM_STACK_SIZE 0x440
+#elif IMAGE_BL2
+#define PLATFORM_STACK_SIZE 0x400
+#elif IMAGE_BL31
+#define PLATFORM_STACK_SIZE 0x800
+#elif IMAGE_BL32
+#define PLATFORM_STACK_SIZE 0x440
+#endif
+
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+
+#define PLATFORM_SYSTEM_COUNT		1
+#define PLATFORM_CLUSTER_COUNT		1
+#define PLATFORM_CLUSTER0_CORE_COUNT	8
+#define PLATFORM_CLUSTER1_CORE_COUNT	0
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER1_CORE_COUNT +	\
+					 PLATFORM_CLUSTER0_CORE_COUNT)
+
+#define PLATFORM_NUM_AFFS		(PLATFORM_SYSTEM_COUNT +	\
+					 PLATFORM_CLUSTER_COUNT +	\
+					 PLATFORM_CORE_COUNT)
+
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL2
+
+#define PLAT_RK_CLST_TO_CPUID_SHIFT	8
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE		1
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE		2
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* TF txet, ro, rw, Size: 512KB */
+#define TZRAM_BASE		(0x0)
+#define TZRAM_SIZE		(0x100000)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted RAM
+ */
+#define BL31_BASE		(TZRAM_BASE + 0x40000)
+#define BL31_LIMIT		(TZRAM_BASE + TZRAM_SIZE)
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ULL << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ULL << 32)
+
+#define ADDR_SPACE_SIZE			(1ULL << 32)
+#define MAX_XLAT_TABLES			18
+#define MAX_MMAP_REGIONS		27
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT	6
+#define CACHE_WRITEBACK_GRANULE	(1 << CACHE_WRITEBACK_SHIFT)
+
+/*
+ * Define GICD and GICC and GICR base
+ */
+#define PLAT_RK_GICD_BASE	PLAT_GICD_BASE
+#define PLAT_RK_GICC_BASE	PLAT_GICC_BASE
+#define PLAT_RK_GICR_BASE	PLAT_GICR_BASE
+
+#define PLAT_RK_UART_BASE	RK_DBG_UART_BASE
+#define PLAT_RK_UART_CLOCK	RK_DBG_UART_CLOCK
+#define PLAT_RK_UART_BAUDRATE	RK_DBG_UART_BAUDRATE
+
+#define PLAT_RK_PRIMARY_CPU	0x0
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/rockchip/rk3588/plat_sip_calls.c b/plat/rockchip/rk3588/plat_sip_calls.c
new file mode 100644
index 0000000..496e8d7
--- /dev/null
+++ b/plat/rockchip/rk3588/plat_sip_calls.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <drivers/scmi-msg.h>
+
+#include <plat_sip_calls.h>
+#include <rockchip_sip_svc.h>
+
+uintptr_t rockchip_plat_sip_handler(uint32_t smc_fid,
+				    u_register_t x1,
+				    u_register_t x2,
+				    u_register_t x3,
+				    u_register_t x4,
+				    void *cookie,
+				    void *handle,
+				    u_register_t flags)
+{
+	switch (smc_fid) {
+	case RK_SIP_SCMI_AGENT0:
+		scmi_smt_fastcall_smc_entry(0);
+		SMC_RET1(handle, 0);
+
+	default:
+		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
diff --git a/plat/rockchip/rk3588/platform.mk b/plat/rockchip/rk3588/platform.mk
new file mode 100644
index 0000000..07eda40
--- /dev/null
+++ b/plat/rockchip/rk3588/platform.mk
@@ -0,0 +1,98 @@
+#
+# Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+RK_PLAT			:=	plat/rockchip
+RK_PLAT_SOC		:=	${RK_PLAT}/${PLAT}
+RK_PLAT_COMMON		:=	${RK_PLAT}/common
+
+DISABLE_BIN_GENERATION	:=	1
+include lib/libfdt/libfdt.mk
+include lib/xlat_tables_v2/xlat_tables.mk
+
+# GIC-600 configuration
+GICV3_IMPL		:=	GIC600
+GICV3_SUPPORT_GIC600   	:=      1
+
+# Include GICv3 driver files
+include drivers/arm/gic/v3/gicv3.mk
+
+PLAT_INCLUDES		:=	-Iinclude/plat/common				\
+				-Idrivers/arm/gic/v3/				\
+				-Idrivers/scmi-msg/				\
+				-I${RK_PLAT_COMMON}/				\
+				-I${RK_PLAT_COMMON}/drivers/pmu/		\
+				-I${RK_PLAT_COMMON}/drivers/parameter/		\
+				-I${RK_PLAT_COMMON}/include/			\
+				-I${RK_PLAT_COMMON}/pmusram/			\
+				-I${RK_PLAT_COMMON}/scmi/			\
+				-I${RK_PLAT_SOC}/				\
+				-I${RK_PLAT_SOC}/drivers/pmu/			\
+				-I${RK_PLAT_SOC}/drivers/scmi/			\
+				-I${RK_PLAT_SOC}/drivers/secure/		\
+				-I${RK_PLAT_SOC}/drivers/soc/			\
+				-I${RK_PLAT_SOC}/include/
+
+RK_GIC_SOURCES		:=	${GICV3_SOURCES}				\
+				plat/common/plat_gicv3.c			\
+				${RK_PLAT}/common/rockchip_gicv3.c
+
+PLAT_BL_COMMON_SOURCES	:=	${XLAT_TABLES_LIB_SRCS}				\
+				common/desc_image_load.c			\
+				plat/common/aarch64/crash_console_helpers.S	\
+				lib/bl_aux_params/bl_aux_params.c		\
+				plat/common/plat_psci_common.c
+
+ifneq (${ENABLE_STACK_PROTECTOR},0)
+PLAT_BL_COMMON_SOURCES	+=	${RK_PLAT_COMMON}/rockchip_stack_protector.c
+endif
+
+BL31_SOURCES		+=	${RK_GIC_SOURCES}				\
+				drivers/ti/uart/aarch64/16550_console.S		\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				drivers/scmi-msg/base.c				\
+				drivers/scmi-msg/clock.c			\
+				drivers/scmi-msg/entry.c			\
+				drivers/scmi-msg/reset_domain.c			\
+				drivers/scmi-msg/smt.c				\
+				lib/cpus/aarch64/cortex_a55.S			\
+				lib/cpus/aarch64/cortex_a76.S			\
+				${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
+				${RK_PLAT_COMMON}/aarch64/platform_common.c	\
+				${RK_PLAT_COMMON}/bl31_plat_setup.c		\
+				${RK_PLAT_COMMON}/plat_pm.c			\
+				${RK_PLAT_COMMON}/plat_pm_helpers.c		\
+				${RK_PLAT_COMMON}/plat_topology.c		\
+				${RK_PLAT_COMMON}/params_setup.c                \
+				${RK_PLAT_COMMON}/pmusram/cpus_on_fixed_addr.S	\
+				${RK_PLAT_COMMON}/rockchip_sip_svc.c		\
+				${RK_PLAT_COMMON}/scmi/scmi.c			\
+				${RK_PLAT_COMMON}/scmi/scmi_clock.c		\
+				${RK_PLAT_COMMON}/scmi/scmi_rstd.c		\
+				${RK_PLAT_SOC}/plat_sip_calls.c         	\
+				${RK_PLAT_SOC}/drivers/secure/secure.c		\
+				${RK_PLAT_SOC}/drivers/soc/soc.c		\
+				${RK_PLAT_SOC}/drivers/pmu/pmu.c		\
+				${RK_PLAT_SOC}/drivers/pmu/pm_pd_regs.c		\
+				${RK_PLAT_SOC}/drivers/scmi/rk3588_clk.c	\
+				${RK_PLAT_SOC}/drivers/scmi/rk3588_rstd.c
+
+CTX_INCLUDE_AARCH32_REGS :=     0
+ENABLE_PLAT_COMPAT	:=	0
+MULTI_CONSOLE_API	:=	1
+ERRATA_A55_1530923	:=	1
+
+# System coherency is managed in hardware
+HW_ASSISTED_COHERENCY	:=	1
+
+# When building for systems with hardware-assisted coherency, there's no need to
+# use USE_COHERENT_MEM. Require that USE_COHERENT_MEM must be set to 0 too.
+USE_COHERENT_MEM	:=	0
+
+ENABLE_SPE_FOR_LOWER_ELS	:= 0
+
+$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
+$(eval $(call add_define,PLAT_SKIP_DFS_TLB_DCACHE_MAINTENANCE))
diff --git a/plat/rockchip/rk3588/rk3588_def.h b/plat/rockchip/rk3588/rk3588_def.h
new file mode 100644
index 0000000..412495a
--- /dev/null
+++ b/plat/rockchip/rk3588/rk3588_def.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2024, Rockchip, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_DEF_H__
+#define __PLAT_DEF_H__
+
+#define SIZE_K(n)		((n) * 1024)
+
+#define WITH_16BITS_WMSK(bits)	(0xffff0000 | (bits))
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define RK_BL31_PLAT_PARAM_VAL	0x0f1e2d3c4b5a6978ULL
+
+#define UMCTL0_BASE		0xf7000000
+#define UMCTL1_BASE		0xf8000000
+#define UMCTL2_BASE		0xf9000000
+#define UMCTL3_BASE		0xfa000000
+
+#define GIC600_BASE		0xfe600000
+#define GIC600_SIZE		SIZE_K(64)
+
+#define DAPLITE_BASE		0xfd100000
+#define PMU0SGRF_BASE		0xfd580000
+#define PMU1SGRF_BASE		0xfd582000
+#define BUSSGRF_BASE		0xfd586000
+#define DSUSGRF_BASE		0xfd587000
+#define PMU0GRF_BASE		0xfd588000
+#define PMU1GRF_BASE		0xfd58a000
+
+#define SYSGRF_BASE		0xfd58c000
+#define BIGCORE0GRF_BASE	0xfd590000
+#define BIGCORE1GRF_BASE	0xfd592000
+#define LITCOREGRF_BASE		0xfd594000
+#define DSUGRF_BASE		0xfd598000
+#define DDR01GRF_BASE		0xfd59c000
+#define DDR23GRF_BASE		0xfd59d000
+#define CENTERGRF_BASE		0xfd59e000
+#define GPUGRF_BASE		0xfd5a0000
+#define NPUGRF_BASE		0xfd5a2000
+#define USBGRF_BASE		0xfd5ac000
+#define PHPGRF_BASE		0xfd5b0000
+#define PCIE3PHYGRF_BASE	0xfd5b8000
+#define USB2PHY0_GRF_BASE	0xfd5d0000
+#define USB2PHY1_GRF_BASE	0xfd5d4000
+#define USB2PHY2_GRF_BASE	0xfd5d8000
+#define USB2PHY3_GRF_BASE	0xfd5dc000
+
+#define PMU0IOC_BASE		0xfd5f0000
+#define PMU1IOC_BASE		0xfd5f4000
+#define BUSIOC_BASE		0xfd5f8000
+#define VCCIO1_4_IOC_BASE	0xfd5f9000
+#define VCCIO3_5_IOC_BASE	0xfd5fa000
+#define VCCIO2_IOC_BASE		0xfd5fb000
+#define VCCIO6_IOC_BASE		0xfd5fc000
+
+#define SRAM_BASE		0xff000000
+#define PMUSRAM_BASE		0xff100000
+#define PMUSRAM_SIZE		SIZE_K(128)
+#define PMUSRAM_RSIZE		SIZE_K(64)
+
+#define CRU_BASE		0xfd7c0000
+#define PHP_CRU_BASE		0xfd7c8000
+#define SCRU_BASE		0xfd7d0000
+#define BUSSCRU_BASE		0xfd7d8000
+#define PMU1SCRU_BASE		0xfd7e0000
+#define PMU1CRU_BASE		0xfd7f0000
+
+#define DDR0CRU_BASE		0xfd800000
+#define DDR1CRU_BASE		0xfd804000
+#define DDR2CRU_BASE		0xfd808000
+#define DDR3CRU_BASE		0xfd80c000
+
+#define BIGCORE0CRU_BASE	0xfd810000
+#define BIGCORE1CRU_BASE	0xfd812000
+#define LITCRU_BASE		0xfd814000
+#define DSUCRU_BASE		0xfd818000
+
+#define I2C0_BASE		0xfd880000
+#define UART0_BASE		0xfd890000
+#define GPIO0_BASE		0xfd8a0000
+#define PWM0_BASE		0xfd8b0000
+#define PMUPVTM_BASE		0xfd8c0000
+#define TIMER_HP_BASE		0xfd8c8000
+#define PMU0_BASE		0xfd8d0000
+#define PMU1_BASE		0xfd8d4000
+#define PMU2_BASE		0xfd8d8000
+#define PMU_BASE		PMU0_BASE
+#define PMUWDT_BASE		0xfd8e0000
+#define PMUTIMER_BASE		0xfd8f0000
+#define OSC_CHK_BASE		0xfd9b0000
+#define VOP_BASE		0xfdd90000
+#define HDMIRX_BASE		0xfdee0000
+
+#define MSCH0_BASE		0xfe000000
+#define MSCH1_BASE		0xfe002000
+#define MSCH2_BASE		0xfe004000
+#define MSCH3_BASE		0xfe006000
+#define FIREWALL_DSU_BASE	0xfe010000
+#define FIREWALL_DDR_BASE	0xfe030000
+#define FIREWALL_SYSMEM_BASE	0xfe038000
+#define DDRPHY0_BASE		0xfe0c0000
+#define DDRPHY1_BASE		0xfe0d0000
+#define DDRPHY2_BASE		0xfe0e0000
+#define DDRPHY3_BASE		0xfe0f0000
+#define TIMER_DDR_BASE		0xfe118000
+#define KEYLADDER_BASE		0xfe380000
+#define CRYPTO_S_BASE		0xfe390000
+#define OTP_S_BASE		0xfe3a0000
+#define DCF_BASE		0xfe3c0000
+#define STIMER0_BASE		0xfe3d0000
+#define WDT_S_BASE		0xfe3e0000
+#define CRYPTO_S_BY_KEYLAD_BASE	0xfe420000
+#define NSTIMER0_BASE		0xfeae0000
+#define NSTIMER1_BASE		0xfeae8000
+#define WDT_NS_BASE		0xfeaf0000
+
+#define UART1_BASE		0xfeb40000
+#define UART2_BASE		0xfeb50000
+#define UART3_BASE		0xfeb60000
+#define UART4_BASE		0xfeb70000
+#define UART5_BASE		0xfeb80000
+#define UART6_BASE		0xfeb90000
+#define UART7_BASE		0xfeba0000
+#define UART8_BASE		0xfebb0000
+#define UART9_BASE		0xfebc0000
+
+#define GPIO1_BASE		0xfec20000
+#define GPIO2_BASE		0xfec30000
+#define GPIO3_BASE		0xfec40000
+#define GPIO4_BASE		0xfec50000
+
+#define MAILBOX1_BASE		0xfec70000
+#define OTP_NS_BASE		0xfecc0000
+#define INTMUX0_DDR_BASE	0Xfecf8000
+#define INTMUX1_DDR_BASE	0Xfecfc000
+#define STIMER1_BASE		0xfed30000
+
+/**************************************************************************
+ * sys sram allocation
+ **************************************************************************/
+#define SRAM_ENTRY_BASE		SRAM_BASE
+#define SRAM_PMUM0_SHMEM_BASE	(SRAM_ENTRY_BASE + SIZE_K(3))
+#define SRAM_LD_BASE		(SRAM_ENTRY_BASE + SIZE_K(4))
+#define SRAM_LD_SIZE		SIZE_K(64)
+
+#define SRAM_LD_SP		(SRAM_LD_BASE + SRAM_LD_SIZE -\
+				 128)
+
+/**************************************************************************
+ * share mem region allocation: 1M~2M
+ **************************************************************************/
+#define DDR_SHARE_MEM		SIZE_K(1024)
+#define DDR_SHARE_SIZE		SIZE_K(64)
+
+#define SHARE_MEM_BASE		DDR_SHARE_MEM
+#define SHARE_MEM_PAGE_NUM	15
+#define SHARE_MEM_SIZE		SIZE_K(SHARE_MEM_PAGE_NUM * 4)
+
+#define	SCMI_SHARE_MEM_BASE	(SHARE_MEM_BASE + SHARE_MEM_SIZE)
+#define	SCMI_SHARE_MEM_SIZE	SIZE_K(4)
+
+#define SMT_BUFFER_BASE		SCMI_SHARE_MEM_BASE
+#define SMT_BUFFER0_BASE	SMT_BUFFER_BASE
+
+/**************************************************************************
+ * UART related constants
+ **************************************************************************/
+#define RK_DBG_UART_BASE		UART2_BASE
+#define RK_DBG_UART_BAUDRATE		1500000
+#define RK_DBG_UART_CLOCK		24000000
+
+/******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define SYS_COUNTER_FREQ_IN_TICKS	24000000
+#define SYS_COUNTER_FREQ_IN_MHZ		24
+
+/******************************************************************************
+ * GIC-600 & interrupt handling related constants
+ ******************************************************************************/
+
+/* Base rk_platform compatible GIC memory map */
+#define PLAT_GICD_BASE			GIC600_BASE
+#define PLAT_GICC_BASE			0
+#define PLAT_GICR_BASE			(GIC600_BASE + 0x80000)
+#define PLAT_GICITS0_BASE		0xfe640000
+#define PLAT_GICITS1_BASE		0xfe660000
+
+/******************************************************************************
+ * sgi, ppi
+ ******************************************************************************/
+#define RK_IRQ_SEC_SGI_0		8
+#define RK_IRQ_SEC_SGI_1		9
+#define RK_IRQ_SEC_SGI_2		10
+#define RK_IRQ_SEC_SGI_3		11
+#define RK_IRQ_SEC_SGI_4		12
+#define RK_IRQ_SEC_SGI_5		13
+#define RK_IRQ_SEC_SGI_6		14
+#define RK_IRQ_SEC_SGI_7		15
+#define RK_IRQ_SEC_PHY_TIMER		29
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+
+#define PLAT_RK_GICV3_G1S_IRQS						\
+	INTR_PROP_DESC(RK_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY,	\
+		       INTR_GROUP1S, GIC_INTR_CFG_LEVEL)
+
+#define PLAT_RK_GICV3_G0_IRQS						\
+	INTR_PROP_DESC(RK_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY,	\
+		       INTR_GROUP0, GIC_INTR_CFG_LEVEL)
+
+/******************************************************************************
+ * pm reg region memory
+ ******************************************************************************/
+#define ROCKCHIP_PM_REG_REGION_MEM_SIZE		SIZE_K(4)
+
+#endif /* __PLAT_DEF_H__ */
diff --git a/plat/st/stm32mp2/bl2_plat_setup.c b/plat/st/stm32mp2/bl2_plat_setup.c
index 724209a..96ac68b 100644
--- a/plat/st/stm32mp2/bl2_plat_setup.c
+++ b/plat/st/stm32mp2/bl2_plat_setup.c
@@ -4,30 +4,268 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <assert.h>
 #include <cdefs.h>
 #include <stdint.h>
 
 #include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <drivers/clk.h>
+#include <drivers/mmc.h>
+#include <drivers/st/regulator_fixed.h>
+#include <drivers/st/stm32mp2_ddr_helpers.h>
+#include <lib/fconf/fconf.h>
+#include <lib/fconf/fconf_dyn_cfg_getter.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
 #include <plat/common/platform.h>
 
 #include <platform_def.h>
 #include <stm32mp_common.h>
+#include <stm32mp_dt.h>
+
+#define BOOT_CTX_ADDR	0x0e000020UL
+
+static void print_reset_reason(void)
+{
+	uint32_t rstsr = mmio_read_32(stm32mp_rcc_base() + RCC_C1BOOTRSTSCLRR);
+
+	if (rstsr == 0U) {
+		WARN("Reset reason unknown\n");
+		return;
+	}
+
+	INFO("Reset reason (0x%x):\n", rstsr);
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_PADRSTF) == 0U) {
+		if ((rstsr & RCC_C1BOOTRSTSCLRR_STBYC1RSTF) != 0U) {
+			INFO("System exits from Standby for CA35\n");
+			return;
+		}
+
+		if ((rstsr & RCC_C1BOOTRSTSCLRR_D1STBYRSTF) != 0U) {
+			INFO("D1 domain exits from DStandby\n");
+			return;
+		}
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_PORRSTF) != 0U) {
+		INFO("  Power-on Reset (rst_por)\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_BORRSTF) != 0U) {
+		INFO("  Brownout Reset (rst_bor)\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSSETR_SYSC2RSTF) != 0U) {
+		INFO("  System reset (SYSRST) by M33\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSSETR_SYSC1RSTF) != 0U) {
+		INFO("  System reset (SYSRST) by A35\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_HCSSRSTF) != 0U) {
+		INFO("  Clock failure on HSE\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_IWDG1SYSRSTF) != 0U) {
+		INFO("  IWDG1 system reset (rst_iwdg1)\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_IWDG2SYSRSTF) != 0U) {
+		INFO("  IWDG2 system reset (rst_iwdg2)\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_IWDG3SYSRSTF) != 0U) {
+		INFO("  IWDG3 system reset (rst_iwdg3)\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_IWDG4SYSRSTF) != 0U) {
+		INFO("  IWDG4 system reset (rst_iwdg4)\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_IWDG5SYSRSTF) != 0U) {
+		INFO("  IWDG5 system reset (rst_iwdg5)\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_C1P1RSTF) != 0U) {
+		INFO("  A35 processor core 1 reset\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_PADRSTF) != 0U) {
+		INFO("  Pad Reset from NRST\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_VCORERSTF) != 0U) {
+		INFO("  Reset due to a failure of VDD_CORE\n");
+		return;
+	}
+
+	if ((rstsr & RCC_C1BOOTRSTSCLRR_C1RSTF) != 0U) {
+		INFO("  A35 processor reset\n");
+		return;
+	}
+
+	ERROR("  Unidentified reset reason\n");
+}
 
 void bl2_el3_early_platform_setup(u_register_t arg0 __unused,
 				  u_register_t arg1 __unused,
 				  u_register_t arg2 __unused,
 				  u_register_t arg3 __unused)
 {
+	stm32mp_save_boot_ctx_address(BOOT_CTX_ADDR);
 }
 
 void bl2_platform_setup(void)
 {
 }
 
+static void reset_backup_domain(void)
+{
+	uintptr_t pwr_base = stm32mp_pwr_base();
+	uintptr_t rcc_base = stm32mp_rcc_base();
+
+	/*
+	 * Disable the backup domain write protection.
+	 * The protection is enable at each reset by hardware
+	 * and must be disabled by software.
+	 */
+	mmio_setbits_32(pwr_base + PWR_BDCR1, PWR_BDCR1_DBD3P);
+
+	while ((mmio_read_32(pwr_base + PWR_BDCR1) & PWR_BDCR1_DBD3P) == 0U) {
+		;
+	}
+
+	/* Reset backup domain on cold boot cases */
+	if ((mmio_read_32(rcc_base + RCC_BDCR) & RCC_BDCR_RTCCKEN) == 0U) {
+		mmio_setbits_32(rcc_base + RCC_BDCR, RCC_BDCR_VSWRST);
+
+		while ((mmio_read_32(rcc_base + RCC_BDCR) & RCC_BDCR_VSWRST) == 0U) {
+			;
+		}
+
+		mmio_clrbits_32(rcc_base + RCC_BDCR, RCC_BDCR_VSWRST);
+	}
+}
+
 void bl2_el3_plat_arch_setup(void)
 {
+	const char *board_model;
+	boot_api_context_t *boot_context =
+		(boot_api_context_t *)stm32mp_get_boot_ctx_address();
+
 	if (stm32_otp_probe() != 0U) {
 		EARLY_ERROR("OTP probe failed\n");
 		panic();
 	}
+
+	mmap_add_region(BL_CODE_BASE, BL_CODE_BASE,
+			BL_CODE_END - BL_CODE_BASE,
+			MT_CODE | MT_SECURE);
+
+	configure_mmu();
+
+	/* Prevent corruption of preloaded Device Tree */
+	mmap_add_dynamic_region(DTB_BASE, DTB_BASE,
+				DTB_LIMIT - DTB_BASE,
+				MT_RO_DATA | MT_SECURE);
+
+	if (dt_open_and_check(STM32MP_DTB_BASE) < 0) {
+		panic();
+	}
+
+	reset_backup_domain();
+
+	/*
+	 * Initialize DDR sub-system clock. This needs to be done before enabling DDR PLL (PLL2),
+	 * and so before stm32mp2_clk_init().
+	 */
+	ddr_sub_system_clk_init();
+
+	if (stm32mp2_clk_init() < 0) {
+		panic();
+	}
+
+	stm32_save_boot_info(boot_context);
+
+	if (stm32mp_uart_console_setup() != 0) {
+		goto skip_console_init;
+	}
+
+	stm32mp_print_cpuinfo();
+
+	board_model = dt_get_board_model();
+	if (board_model != NULL) {
+		NOTICE("Model: %s\n", board_model);
+	}
+
+	stm32mp_print_boardinfo();
+
+	print_reset_reason();
+
+skip_console_init:
+	if (fixed_regulator_register() != 0) {
+		panic();
+	}
+
+	fconf_populate("TB_FW", STM32MP_DTB_BASE);
+
+	stm32mp_io_setup();
+}
+
+/*******************************************************************************
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	int err = 0;
+	bl_mem_params_node_t *bl_mem_params __maybe_unused = get_bl_mem_params_node(image_id);
+
+	assert(bl_mem_params != NULL);
+
+#if STM32MP_SDMMC || STM32MP_EMMC
+	/*
+	 * Invalidate remaining data read from MMC but not flushed by load_image_flush().
+	 * We take the worst case which is 2 MMC blocks.
+	 */
+	if ((image_id != FW_CONFIG_ID) &&
+	    ((bl_mem_params->image_info.h.attr & IMAGE_ATTRIB_SKIP_LOADING) == 0U)) {
+		inv_dcache_range(bl_mem_params->image_info.image_base +
+				 bl_mem_params->image_info.image_size,
+				 2U * MMC_BLOCK_SIZE);
+	}
+#endif /* STM32MP_SDMMC || STM32MP_EMMC */
+
+	switch (image_id) {
+	case FW_CONFIG_ID:
+		/* Set global DTB info for fixed fw_config information */
+		set_config_info(STM32MP_FW_CONFIG_BASE, ~0UL, STM32MP_FW_CONFIG_MAX_SIZE,
+				FW_CONFIG_ID);
+		fconf_populate("FW_CONFIG", STM32MP_FW_CONFIG_BASE);
+
+		mmap_remove_dynamic_region(DTB_BASE, DTB_LIMIT - DTB_BASE);
+
+		break;
+
+	default:
+		/* Do nothing in default case */
+		break;
+	}
+
+	return err;
 }
diff --git a/plat/st/stm32mp2/include/boot_api.h b/plat/st/stm32mp2/include/boot_api.h
index d3bed76..580a65b 100644
--- a/plat/st/stm32mp2/include/boot_api.h
+++ b/plat/st/stm32mp2/include/boot_api.h
@@ -86,7 +86,7 @@
 /* Image Header related definitions */
 
 /* Definition of header version */
-#define BOOT_API_HEADER_VERSION					0x00020000U
+#define BOOT_API_HEADER_VERSION					0x00020200U
 
 /*
  * Magic number used to detect header in memory
diff --git a/plat/st/stm32mp2/include/plat_tbbr_img_def.h b/plat/st/stm32mp2/include/plat_tbbr_img_def.h
new file mode 100644
index 0000000..5dfd41f
--- /dev/null
+++ b/plat/st/stm32mp2/include/plat_tbbr_img_def.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2024, STMicroelectronics - All Rights Reserved
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_TBBR_IMG_DEF_H
+#define PLAT_TBBR_IMG_DEF_H
+
+#include <export/common/tbbr/tbbr_img_def_exp.h>
+
+/* Undef the existing values */
+#undef BKUP_FWU_METADATA_IMAGE_ID
+#undef FWU_METADATA_IMAGE_ID
+#undef FW_CONFIG_ID
+#undef ENC_IMAGE_ID
+#undef GPT_IMAGE_ID
+#undef NT_FW_CONFIG_ID
+#undef SOC_FW_CONFIG_ID
+#undef TB_FW_CONFIG_ID
+#undef HW_CONFIG_ID
+#undef TRUSTED_BOOT_FW_CERT_ID
+#undef SOC_FW_CONTENT_CERT_ID
+#undef BL32_EXTRA1_IMAGE_ID
+#undef TOS_FW_CONFIG_ID
+
+/* Define the STM32MP2 used ID */
+#define FW_CONFIG_ID			U(1)
+#define HW_CONFIG_ID			U(2)
+#define ENC_IMAGE_ID			U(6)
+#define BL32_EXTRA1_IMAGE_ID		U(8)
+#define FWU_METADATA_IMAGE_ID		U(12)
+#define BKUP_FWU_METADATA_IMAGE_ID	U(13)
+#define TOS_FW_CONFIG_ID		U(16)
+#define NT_FW_CONFIG_ID			U(18)
+#define SOC_FW_CONFIG_ID		U(19)
+#define TB_FW_CONFIG_ID			U(20)
+#define TRUSTED_BOOT_FW_CERT_ID		U(21)
+#define SOC_FW_CONTENT_CERT_ID		U(23)
+#define STM32MP_CONFIG_CERT_ID		U(24)
+#define GPT_IMAGE_ID			U(25)
+
+/* Increase the MAX_NUMBER_IDS to match the authentication pool required */
+#define MAX_NUMBER_IDS			U(26)
+
+#endif /* PLAT_TBBR_IMG_DEF_H */
+
diff --git a/plat/st/stm32mp2/include/platform_def.h b/plat/st/stm32mp2/include/platform_def.h
index 404c384..2f7570d 100644
--- a/plat/st/stm32mp2/include/platform_def.h
+++ b/plat/st/stm32mp2/include/platform_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2023, STMicroelectronics - All Rights Reserved
+ * Copyright (c) 2023-2024, STMicroelectronics - All Rights Reserved
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -61,12 +61,33 @@
 #define BL2_LIMIT			(STM32MP_BL2_BASE + \
 					 STM32MP_BL2_SIZE)
 
+#define BL2_RO_BASE			STM32MP_BL2_RO_BASE
+#define BL2_RO_LIMIT			(STM32MP_BL2_RO_BASE + \
+					 STM32MP_BL2_RO_SIZE)
+
+#define BL2_RW_BASE			STM32MP_BL2_RW_BASE
+#define BL2_RW_LIMIT			(STM32MP_BL2_RW_BASE + \
+					 STM32MP_BL2_RW_SIZE)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+#define BL31_BASE			0
+#define BL31_LIMIT			STM32MP_BL31_SIZE
+
 /*******************************************************************************
  * BL33 specific defines.
  ******************************************************************************/
 #define BL33_BASE			STM32MP_BL33_BASE
 
 /*******************************************************************************
+ * DTB specific defines.
+ ******************************************************************************/
+#define DTB_BASE			STM32MP_DTB_BASE
+#define DTB_LIMIT			(STM32MP_DTB_BASE + \
+					 STM32MP_DTB_SIZE)
+
+/*******************************************************************************
  * Platform specific page table and MMU setup constants
  ******************************************************************************/
 #define PLAT_PHY_ADDR_SPACE_SIZE	(ULL(1) << 33)
diff --git a/plat/st/stm32mp2/include/stm32mp2_private.h b/plat/st/stm32mp2/include/stm32mp2_private.h
index e1403d2..2ab5001 100644
--- a/plat/st/stm32mp2/include/stm32mp2_private.h
+++ b/plat/st/stm32mp2/include/stm32mp2_private.h
@@ -7,6 +7,10 @@
 #ifndef STM32MP2_PRIVATE_H
 #define STM32MP2_PRIVATE_H
 
+void configure_mmu(void);
+
+uint32_t stm32mp_syscfg_get_chip_dev_id(void);
+
 /* Wrappers for OTP / BSEC functions */
 static inline uint32_t stm32_otp_probe(void)
 {
diff --git a/plat/st/stm32mp2/plat_bl2_mem_params_desc.c b/plat/st/stm32mp2/plat_bl2_mem_params_desc.c
index 630cc84..0ef522e 100644
--- a/plat/st/stm32mp2/plat_bl2_mem_params_desc.c
+++ b/plat/st/stm32mp2/plat_bl2_mem_params_desc.c
@@ -1,10 +1,14 @@
 /*
- * Copyright (c) 2023, STMicroelectronics - All Rights Reserved
+ * Copyright (c) 2023-2024, STMicroelectronics - All Rights Reserved
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <common/bl_common.h>
 #include <common/desc_image_load.h>
+#include <plat/common/platform.h>
+
+#include <platform_def.h>
 
 /*******************************************************************************
  * Following descriptor provides BL image/ep information that gets used
@@ -15,6 +19,21 @@
  * the next executable image id.
  ******************************************************************************/
 static bl_mem_params_node_t bl2_mem_params_descs[] = {
+	/* Fill FW_CONFIG related information if it exists */
+	{
+		.image_id = FW_CONFIG_ID,
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+				      VERSION_2, entry_point_info_t,
+				      SECURE | NON_EXECUTABLE),
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+				      VERSION_2, image_info_t,
+				      IMAGE_ATTRIB_PLAT_SETUP),
+
+		.image_info.image_base = STM32MP_FW_CONFIG_BASE,
+		.image_info.image_max_size = STM32MP_FW_CONFIG_MAX_SIZE,
+
+		.next_handoff_image_id = INVALID_IMAGE_ID,
+	},
 };
 
 REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/st/stm32mp2/platform.mk b/plat/st/stm32mp2/platform.mk
index d9a4d79..eacbd96 100644
--- a/plat/st/stm32mp2/platform.mk
+++ b/plat/st/stm32mp2/platform.mk
@@ -13,6 +13,7 @@
 CRASH_REPORTING			:=	1
 ENABLE_PIE			:=	1
 PROGRAMMABLE_RESET_ADDRESS	:=	1
+BL2_IN_XIP_MEM			:=	1
 
 # Default Device tree
 DTB_FILE_NAME			?=	stm32mp257f-ev1.dtb
@@ -24,7 +25,7 @@
 STM32_HEADER_VERSION_MINOR	:=	2
 
 # Set load address for serial boot devices
-DWL_BUFFER_BASE 	?=	0x87000000
+DWL_BUFFER_BASE 		?=	0x87000000
 
 # Device tree
 BL2_DTSI			:=	stm32mp25-bl2.dtsi
@@ -35,9 +36,32 @@
 STM32_LD_FILE			:=	plat/st/stm32mp2/${ARCH}/stm32mp2.ld.S
 STM32_BINARY_MAPPING		:=	plat/st/stm32mp2/${ARCH}/stm32mp2.S
 
+STM32MP_FW_CONFIG_NAME		:=	$(patsubst %.dtb,%-fw-config.dtb,$(DTB_FILE_NAME))
+STM32MP_FW_CONFIG		:=	${BUILD_PLAT}/fdts/$(STM32MP_FW_CONFIG_NAME)
+FDT_SOURCES			+=	$(addprefix fdts/, $(patsubst %.dtb,%.dts,$(STM32MP_FW_CONFIG_NAME)))
+# Add the FW_CONFIG to FIP and specify the same to certtool
+$(eval $(call TOOL_ADD_PAYLOAD,${STM32MP_FW_CONFIG},--fw-config))
+
+# Enable flags for C files
+$(eval $(call assert_booleans,\
+	$(sort \
+		STM32MP25 \
+)))
+
+$(eval $(call assert_numerics,\
+	$(sort \
+		PLAT_PARTITION_MAX_ENTRIES \
+		STM32_HEADER_VERSION_MAJOR \
+		STM32_TF_A_COPIES \
+)))
+
 $(eval $(call add_defines,\
 	$(sort \
 		DWL_BUFFER_BASE \
+		PLAT_PARTITION_MAX_ENTRIES \
+		PLAT_TBBR_IMG_DEF \
+		STM32_TF_A_COPIES \
+		STM32MP25 \
 )))
 
 # STM32MP2x is based on Cortex-A35, which is Armv8.0, and does not support BTI
@@ -51,17 +75,28 @@
 PLAT_BL_COMMON_SOURCES		+=	drivers/st/uart/${ARCH}/stm32_console.S
 PLAT_BL_COMMON_SOURCES		+=	plat/st/stm32mp2/${ARCH}/stm32mp2_helper.S
 
+PLAT_BL_COMMON_SOURCES		+=	plat/st/stm32mp2/stm32mp2_private.c
+
 PLAT_BL_COMMON_SOURCES		+=	drivers/st/bsec/bsec3.c					\
-					drivers/st/reset/stm32mp2_reset.c
+					drivers/st/reset/stm32mp2_reset.c			\
+					plat/st/stm32mp2/stm32mp2_syscfg.c
 
 PLAT_BL_COMMON_SOURCES		+=	drivers/st/clk/clk-stm32-core.c				\
 					drivers/st/clk/clk-stm32mp2.c
 
 BL2_SOURCES			+=	plat/st/stm32mp2/plat_bl2_mem_params_desc.c
+
 BL2_SOURCES			+=	plat/st/stm32mp2/bl2_plat_setup.c
 
+ifneq ($(filter 1,${STM32MP_EMMC} ${STM32MP_SDMMC}),)
+BL2_SOURCES			+=	drivers/st/mmc/stm32_sdmmc2.c
+endif
+
 ifeq (${STM32MP_USB_PROGRAMMER},1)
 BL2_SOURCES			+=	plat/st/stm32mp2/stm32mp2_usb_dfu.c
 endif
 
+BL2_SOURCES			+=	drivers/st/ddr/stm32mp2_ddr_helpers.c
+
+# Compilation rules
 include plat/st/common/common_rules.mk
diff --git a/plat/st/stm32mp2/stm32mp2_def.h b/plat/st/stm32mp2/stm32mp2_def.h
index d65fcea..81fdaae 100644
--- a/plat/st/stm32mp2/stm32mp2_def.h
+++ b/plat/st/stm32mp2/stm32mp2_def.h
@@ -12,6 +12,10 @@
 #include <drivers/st/bsec.h>
 #endif
 #include <drivers/st/stm32mp25_rcc.h>
+#ifndef __ASSEMBLER__
+#include <drivers/st/stm32mp2_clk.h>
+#endif
+#include <drivers/st/stm32mp2_pwr.h>
 #include <dt-bindings/clock/stm32mp25-clks.h>
 #include <dt-bindings/clock/stm32mp25-clksrc.h>
 #include <dt-bindings/gpio/stm32-gpio.h>
@@ -26,14 +30,48 @@
 #endif
 
 /*******************************************************************************
+ * CHIP ID
+ ******************************************************************************/
+#define STM32MP2_CHIP_ID			U(0x505)
+
+#define STM32MP251A_PART_NB			U(0x400B3E6D)
+#define STM32MP251C_PART_NB			U(0x000B306D)
+#define STM32MP251D_PART_NB			U(0xC00B3E6D)
+#define STM32MP251F_PART_NB			U(0x800B306D)
+#define STM32MP253A_PART_NB			U(0x400B3E0C)
+#define STM32MP253C_PART_NB			U(0x000B300C)
+#define STM32MP253D_PART_NB			U(0xC00B3E0C)
+#define STM32MP253F_PART_NB			U(0x800B300C)
+#define STM32MP255A_PART_NB			U(0x40082E00)
+#define STM32MP255C_PART_NB			U(0x00082000)
+#define STM32MP255D_PART_NB			U(0xC0082E00)
+#define STM32MP255F_PART_NB			U(0x80082000)
+#define STM32MP257A_PART_NB			U(0x40002E00)
+#define STM32MP257C_PART_NB			U(0x00002000)
+#define STM32MP257D_PART_NB			U(0xC0002E00)
+#define STM32MP257F_PART_NB			U(0x80002000)
+
+#define STM32MP2_REV_A				U(0x08)
+#define STM32MP2_REV_B				U(0x10)
+#define STM32MP2_REV_X				U(0x12)
+#define STM32MP2_REV_Y				U(0x11)
+#define STM32MP2_REV_Z				U(0x09)
+
+/*******************************************************************************
+ * PACKAGE ID
+ ******************************************************************************/
+#define STM32MP25_PKG_CUSTOM			U(0)
+#define STM32MP25_PKG_AL_VFBGA361		U(1)
+#define STM32MP25_PKG_AK_VFBGA424		U(3)
+#define STM32MP25_PKG_AI_TFBGA436		U(5)
+#define STM32MP25_PKG_UNKNOWN			U(7)
+
+/*******************************************************************************
  * STM32MP2 memory map related constants
  ******************************************************************************/
 #define STM32MP_SYSRAM_BASE			U(0x0E000000)
 #define STM32MP_SYSRAM_SIZE			U(0x00040000)
 
-#define STM32MP_SEC_SYSRAM_BASE			STM32MP_SYSRAM_BASE
-#define STM32MP_SEC_SYSRAM_SIZE			STM32MP_SYSRAM_SIZE
-
 /* DDR configuration */
 #define STM32MP_DDR_BASE			U(0x80000000)
 #define STM32MP_DDR_MAX_SIZE			UL(0x100000000)	/* Max 4GB */
@@ -49,28 +87,38 @@
 
 /* Section used inside TF binaries */
 #define STM32MP_PARAM_LOAD_SIZE			U(0x00002400) /* 9 KB for param */
-/* 512 Octets reserved for header */
+/* 512 Bytes reserved for header */
 #define STM32MP_HEADER_SIZE			U(0x00000200)
-#define STM32MP_HEADER_BASE			(STM32MP_SEC_SYSRAM_BASE +	\
+#define STM32MP_HEADER_BASE			(STM32MP_SYSRAM_BASE +	\
 						 STM32MP_PARAM_LOAD_SIZE)
 
 /* round_up(STM32MP_PARAM_LOAD_SIZE + STM32MP_HEADER_SIZE, PAGE_SIZE) */
 #define STM32MP_HEADER_RESERVED_SIZE		U(0x3000)
 
-#define STM32MP_BINARY_BASE			(STM32MP_SEC_SYSRAM_BASE +	\
+#define STM32MP_BINARY_BASE			(STM32MP_SYSRAM_BASE +	\
 						 STM32MP_PARAM_LOAD_SIZE +	\
 						 STM32MP_HEADER_SIZE)
 
-#define STM32MP_BINARY_SIZE			(STM32MP_SEC_SYSRAM_SIZE -	\
+#define STM32MP_BINARY_SIZE			(STM32MP_SYSRAM_SIZE -	\
 						 (STM32MP_PARAM_LOAD_SIZE +	\
 						  STM32MP_HEADER_SIZE))
 
-#define STM32MP_BL2_SIZE			U(0x0002A000) /* 168 KB for BL2 */
+#define STM32MP_BL2_RO_SIZE			U(0x00020000) /* 128 KB */
+#define STM32MP_BL2_SIZE			U(0x00029000) /* 164 KB for BL2 */
 
-#define STM32MP_BL2_BASE			(STM32MP_SEC_SYSRAM_BASE + \
-						 STM32MP_SEC_SYSRAM_SIZE - \
+#define STM32MP_BL2_BASE			(STM32MP_SYSRAM_BASE + \
+						 STM32MP_SYSRAM_SIZE - \
 						 STM32MP_BL2_SIZE)
 
+#define STM32MP_BL2_RO_BASE			STM32MP_BL2_BASE
+
+#define STM32MP_BL2_RW_BASE			(STM32MP_BL2_RO_BASE + \
+						 STM32MP_BL2_RO_SIZE)
+
+#define STM32MP_BL2_RW_SIZE			(STM32MP_SYSRAM_BASE + \
+						 STM32MP_SYSRAM_SIZE - \
+						 STM32MP_BL2_RW_BASE)
+
 /* BL2 and BL32/sp_min require 4 tables */
 #define MAX_XLAT_TABLES				U(4)	/* 16 KB for mapping */
 
@@ -81,13 +129,30 @@
 #define MAX_MMAP_REGIONS			6
 
 /* DTB initialization value */
-#define STM32MP_BL2_DTB_SIZE			U(0x00005000) /* 20 KB for DTB */
+#define STM32MP_BL2_DTB_SIZE			U(0x00006000)	/* 24 KB for DTB */
 
 #define STM32MP_BL2_DTB_BASE			(STM32MP_BL2_BASE - \
 						 STM32MP_BL2_DTB_SIZE)
 
+#if defined(IMAGE_BL2)
+#define STM32MP_DTB_SIZE			STM32MP_BL2_DTB_SIZE
+#define STM32MP_DTB_BASE			STM32MP_BL2_DTB_BASE
+#endif
+
+#define STM32MP_FW_CONFIG_MAX_SIZE		PAGE_SIZE
+#define STM32MP_FW_CONFIG_BASE			STM32MP_SYSRAM_BASE
+
 #define STM32MP_BL33_BASE			(STM32MP_DDR_BASE + U(0x04000000))
 #define STM32MP_BL33_MAX_SIZE			U(0x400000)
+#define STM32MP_HW_CONFIG_BASE			(STM32MP_BL33_BASE + \
+						STM32MP_BL33_MAX_SIZE)
+#define STM32MP_HW_CONFIG_MAX_SIZE		U(0x40000)
+
+/*******************************************************************************
+ * STM32MP2 device/io map related constants (used for MMU)
+ ******************************************************************************/
+#define STM32MP_DEVICE_BASE			U(0x40000000)
+#define STM32MP_DEVICE_SIZE			U(0x40000000)
 
 /*******************************************************************************
  * STM32MP2 RCC
@@ -172,6 +237,7 @@
 
 /* OTP labels */
 #define PART_NUMBER_OTP				"part-number-otp"
+#define REVISION_OTP				"rev_otp"
 #define PACKAGE_OTP				"package-otp"
 #define HCONF1_OTP				"otp124"
 #define NAND_OTP				"otp16"
@@ -314,6 +380,7 @@
 #define DT_DDR_COMPAT				"st,stm32mp2-ddr"
 #define DT_PWR_COMPAT				"st,stm32mp25-pwr"
 #define DT_RCC_CLK_COMPAT			"st,stm32mp25-rcc"
+#define DT_SDMMC2_COMPAT			"st,stm32mp25-sdmmc2"
 #define DT_UART_COMPAT				"st,stm32h7-uart"
 
 #endif /* STM32MP2_DEF_H */
diff --git a/plat/st/stm32mp2/stm32mp2_private.c b/plat/st/stm32mp2/stm32mp2_private.c
new file mode 100644
index 0000000..2801a7f
--- /dev/null
+++ b/plat/st/stm32mp2/stm32mp2_private.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2023-2024, STMicroelectronics - All Rights Reserved
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <platform_def.h>
+
+#define BKPR_BOOT_MODE	96U
+
+#define MAP_SYSRAM	MAP_REGION_FLAT(STM32MP_SYSRAM_BASE, \
+					STM32MP_SYSRAM_SIZE, \
+					MT_MEMORY | \
+					MT_RW | \
+					MT_SECURE | \
+					MT_EXECUTE_NEVER)
+
+#define MAP_DEVICE	MAP_REGION_FLAT(STM32MP_DEVICE_BASE, \
+					STM32MP_DEVICE_SIZE, \
+					MT_DEVICE | \
+					MT_RW | \
+					MT_SECURE | \
+					MT_EXECUTE_NEVER)
+
+#if defined(IMAGE_BL2)
+static const mmap_region_t stm32mp2_mmap[] = {
+	MAP_SYSRAM,
+	MAP_DEVICE,
+	{0}
+};
+#endif
+
+void configure_mmu(void)
+{
+	mmap_add(stm32mp2_mmap);
+	init_xlat_tables();
+
+	enable_mmu_el3(0);
+}
+
+uintptr_t stm32_get_gpio_bank_base(unsigned int bank)
+{
+	if (bank == GPIO_BANK_Z) {
+		return GPIOZ_BASE;
+	}
+
+	assert(bank <= GPIO_BANK_K);
+
+	return GPIOA_BASE + (bank * GPIO_BANK_OFFSET);
+}
+
+uint32_t stm32_get_gpio_bank_offset(unsigned int bank)
+{
+	if (bank == GPIO_BANK_Z) {
+		return 0;
+	}
+
+	assert(bank <= GPIO_BANK_K);
+
+	return bank * GPIO_BANK_OFFSET;
+}
+
+unsigned long stm32_get_gpio_bank_clock(unsigned int bank)
+{
+	if (bank == GPIO_BANK_Z) {
+		return CK_BUS_GPIOZ;
+	}
+
+	assert(bank <= GPIO_BANK_K);
+
+	return CK_BUS_GPIOA + (bank - GPIO_BANK_A);
+}
+
+uint32_t stm32mp_get_chip_version(void)
+{
+	static uint32_t rev;
+
+	if (rev != 0U) {
+		return rev;
+	}
+
+	if (stm32_get_otp_value(REVISION_OTP, &rev) != 0) {
+		panic();
+	}
+
+	return rev;
+}
+
+uint32_t stm32mp_get_chip_dev_id(void)
+{
+	return stm32mp_syscfg_get_chip_dev_id();
+}
+
+static uint32_t get_part_number(void)
+{
+	static uint32_t part_number;
+
+	if (part_number != 0U) {
+		return part_number;
+	}
+
+	if (stm32_get_otp_value(PART_NUMBER_OTP, &part_number) != 0) {
+		panic();
+	}
+
+	return part_number;
+}
+
+static uint32_t get_cpu_package(void)
+{
+	static uint32_t package = UINT32_MAX;
+
+	if (package == UINT32_MAX) {
+		if (stm32_get_otp_value(PACKAGE_OTP, &package) != 0) {
+			panic();
+		}
+	}
+
+	return (package & PACKAGE_OTP_PKG_MASK) >> PACKAGE_OTP_PKG_SHIFT;
+}
+
+void stm32mp_get_soc_name(char name[STM32_SOC_NAME_SIZE])
+{
+	char *cpu_s, *cpu_r, *pkg;
+
+	/* MPUs Part Numbers */
+	switch (get_part_number()) {
+	case STM32MP251A_PART_NB:
+		cpu_s = "251A";
+		break;
+	case STM32MP251C_PART_NB:
+		cpu_s = "251C";
+		break;
+	case STM32MP251D_PART_NB:
+		cpu_s = "251D";
+		break;
+	case STM32MP251F_PART_NB:
+		cpu_s = "251F";
+		break;
+	case STM32MP253A_PART_NB:
+		cpu_s = "253A";
+		break;
+	case STM32MP253C_PART_NB:
+		cpu_s = "253C";
+		break;
+	case STM32MP253D_PART_NB:
+		cpu_s = "253D";
+		break;
+	case STM32MP253F_PART_NB:
+		cpu_s = "253F";
+		break;
+	case STM32MP255A_PART_NB:
+		cpu_s = "255A";
+		break;
+	case STM32MP255C_PART_NB:
+		cpu_s = "255C";
+		break;
+	case STM32MP255D_PART_NB:
+		cpu_s = "255D";
+		break;
+	case STM32MP255F_PART_NB:
+		cpu_s = "255F";
+		break;
+	case STM32MP257A_PART_NB:
+		cpu_s = "257A";
+		break;
+	case STM32MP257C_PART_NB:
+		cpu_s = "257C";
+		break;
+	case STM32MP257D_PART_NB:
+		cpu_s = "257D";
+		break;
+	case STM32MP257F_PART_NB:
+		cpu_s = "257F";
+		break;
+	default:
+		cpu_s = "????";
+		break;
+	}
+
+	/* Package */
+	switch (get_cpu_package()) {
+	case STM32MP25_PKG_CUSTOM:
+		pkg = "XX";
+		break;
+	case STM32MP25_PKG_AL_VFBGA361:
+		pkg = "AL";
+		break;
+	case STM32MP25_PKG_AK_VFBGA424:
+		pkg = "AK";
+		break;
+	case STM32MP25_PKG_AI_TFBGA436:
+		pkg = "AI";
+		break;
+	default:
+		pkg = "??";
+		break;
+	}
+
+	/* REVISION */
+	switch (stm32mp_get_chip_version()) {
+	case STM32MP2_REV_A:
+		cpu_r = "A";
+		break;
+	case STM32MP2_REV_B:
+		cpu_r = "B";
+		break;
+	case STM32MP2_REV_X:
+		cpu_r = "X";
+		break;
+	case STM32MP2_REV_Y:
+		cpu_r = "Y";
+		break;
+	case STM32MP2_REV_Z:
+		cpu_r = "Z";
+		break;
+	default:
+		cpu_r = "?";
+		break;
+	}
+
+	snprintf(name, STM32_SOC_NAME_SIZE,
+		 "STM32MP%s%s Rev.%s", cpu_s, pkg, cpu_r);
+}
+
+void stm32mp_print_cpuinfo(void)
+{
+	char name[STM32_SOC_NAME_SIZE];
+
+	stm32mp_get_soc_name(name);
+	NOTICE("CPU: %s\n", name);
+}
+
+void stm32mp_print_boardinfo(void)
+{
+	uint32_t board_id = 0U;
+
+	if (stm32_get_otp_value(BOARD_ID_OTP, &board_id) != 0) {
+		return;
+	}
+
+	if (board_id != 0U) {
+		stm32_display_board_info(board_id);
+	}
+}
+
+uintptr_t stm32_get_bkpr_boot_mode_addr(void)
+{
+	return tamp_bkpr(BKPR_BOOT_MODE);
+}
diff --git a/plat/st/stm32mp2/stm32mp2_syscfg.c b/plat/st/stm32mp2/stm32mp2_syscfg.c
new file mode 100644
index 0000000..46c75a6
--- /dev/null
+++ b/plat/st/stm32mp2/stm32mp2_syscfg.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2024, STMicroelectronics - All Rights Reserved
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+
+#include <platform_def.h>
+#include <stm32mp2_private.h>
+
+/*
+ * SYSCFG register offsets (base relative)
+ */
+#define SYSCFG_DEVICEID			0x6400U
+
+/*
+ * SYSCFG_DEVICEID Register
+ */
+#define SYSCFG_DEVICEID_DEV_ID_MASK	GENMASK_32(11, 0)
+
+/*
+ * @brief  Get device ID from SYSCFG registers.
+ * @retval device ID (DEV_ID).
+ */
+uint32_t stm32mp_syscfg_get_chip_dev_id(void)
+{
+	return mmio_read_32(SYSCFG_BASE + SYSCFG_DEVICEID) & SYSCFG_DEVICEID_DEV_ID_MASK;
+}
diff --git a/plat/xilinx/common/include/pm_common.h b/plat/xilinx/common/include/pm_common.h
index c38cdef..68d1db2 100644
--- a/plat/xilinx/common/include/pm_common.h
+++ b/plat/xilinx/common/include/pm_common.h
@@ -18,7 +18,6 @@
 
 #if IPI_CRC_CHECK
 #define PAYLOAD_ARG_CNT         8U
-#define RET_PAYLOAD_ARG_CNT	7U
 #define IPI_W0_TO_W6_SIZE       28U
 #define PAYLOAD_CRC_POS         7U
 #define CRC_INIT_VALUE          0x4F4EU
@@ -26,8 +25,8 @@
 #define CRC_POLYNOM             0x8005U
 #else
 #define PAYLOAD_ARG_CNT		7U
-#define RET_PAYLOAD_ARG_CNT	6U
 #endif
+#define RET_PAYLOAD_ARG_CNT	6U
 #define PAYLOAD_ARG_SIZE	4U	/* size in bytes */
 
 #define TZ_VERSION_MAJOR	1
diff --git a/plat/xilinx/common/pm_service/pm_svc_main.c b/plat/xilinx/common/pm_service/pm_svc_main.c
index b431a6c..193c5dc 100644
--- a/plat/xilinx/common/pm_service/pm_svc_main.c
+++ b/plat/xilinx/common/pm_service/pm_svc_main.c
@@ -503,8 +503,8 @@
 				  void *handle, uint32_t security_flag)
 {
 	enum pm_ret_status ret;
-	uint32_t buf[PAYLOAD_ARG_CNT] = {0};
-	uint32_t payload[PAYLOAD_ARG_CNT] = {0};
+	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U};
+	uint32_t payload[PAYLOAD_ARG_CNT] = {0U};
 	uint32_t module_id;
 
 	module_id = (api_id & MODULE_ID_MASK) >> 8U;
@@ -514,7 +514,7 @@
 			 pm_arg[4], pm_arg[5]);
 
 	ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf,
-			       PAYLOAD_ARG_CNT);
+			       RET_PAYLOAD_ARG_CNT);
 
 	SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U),
diff --git a/services/spd/pncd/pncd_common.c b/services/spd/pncd/pncd_common.c
index 6fdb629..8e89491 100644
--- a/services/spd/pncd/pncd_common.c
+++ b/services/spd/pncd/pncd_common.c
@@ -67,8 +67,9 @@
 	/* Apply the Secure EL1 system register context and switch to it */
 	assert(cm_get_context(SECURE) == &pnc_ctx->cpu_ctx);
 	cm_el1_sysregs_context_restore(SECURE);
+
 #if CTX_INCLUDE_FPREGS
-	fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
+	simd_ctx_restore(SECURE);
 #endif
 	cm_set_next_eret_context(SECURE);
 
@@ -90,8 +91,9 @@
 	/* Save the Secure EL1 system register context */
 	assert(cm_get_context(SECURE) == &pnc_ctx->cpu_ctx);
 	cm_el1_sysregs_context_save(SECURE);
+
 #if CTX_INCLUDE_FPREGS
-	fpregs_context_save(get_fpregs_ctx(cm_get_context(SECURE)));
+	simd_ctx_save(SECURE, false);
 #endif
 
 	assert(pnc_ctx->c_rt_ctx != 0);
diff --git a/services/spd/pncd/pncd_main.c b/services/spd/pncd/pncd_main.c
index 99c4aa1..cc1c1f2 100644
--- a/services/spd/pncd/pncd_main.c
+++ b/services/spd/pncd/pncd_main.c
@@ -55,8 +55,9 @@
 	assert(sec_state_is_valid(security_state));
 
 	cm_el1_sysregs_context_save((uint32_t) security_state);
+
 #if CTX_INCLUDE_FPREGS
-	fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
+	simd_ctx_save((uint32_t)security_state, false);
 #endif
 }
 
@@ -72,8 +73,9 @@
 
 	/* Restore state */
 	cm_el1_sysregs_context_restore((uint32_t) security_state);
+
 #if CTX_INCLUDE_FPREGS
-	fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));
+	simd_ctx_restore((uint32_t)security_state);
 #endif
 
 	cm_set_next_eret_context((uint32_t) security_state);
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
index f2048a3..aae2d9a 100644
--- a/services/spd/trusty/trusty.c
+++ b/services/spd/trusty/trusty.c
@@ -118,8 +118,10 @@
 	 * when it's needed the PSCI caller has preserved FP context before
 	 * going here.
 	 */
-	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
-		fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
+	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME) {
+		simd_ctx_save(security_state, false);
+	}
+
 	cm_el1_sysregs_context_save(security_state);
 
 	ctx->saved_security_state = security_state;
@@ -128,8 +130,9 @@
 	assert(ctx->saved_security_state == ((security_state == 0U) ? 1U : 0U));
 
 	cm_el1_sysregs_context_restore(security_state);
-	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
-		fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));
+	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME) {
+		simd_ctx_restore(security_state);
+	}
 
 	cm_set_next_eret_context(security_state);
 
@@ -320,7 +323,7 @@
 	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
 	assert(ep_info != NULL);
 
-	fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+	simd_ctx_save(NON_SECURE, false);
 	cm_el1_sysregs_context_save(NON_SECURE);
 
 	cm_set_context(&ctx->cpu_ctx, SECURE);
@@ -337,7 +340,7 @@
 	}
 
 	cm_el1_sysregs_context_restore(SECURE);
-	fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
+	simd_ctx_restore(SECURE);
 	cm_set_next_eret_context(SECURE);
 
 	ctx->saved_security_state = ~0U; /* initial saved state is invalid */
@@ -346,7 +349,7 @@
 	(void)trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
 
 	cm_el1_sysregs_context_restore(NON_SECURE);
-	fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+	simd_ctx_restore(NON_SECURE);
 	cm_set_next_eret_context(NON_SECURE);
 
 	return 1;
diff --git a/services/std_svc/errata_abi/cpu_errata_info.h b/services/std_svc/errata_abi/cpu_errata_info.h
index 61e1076..d688431 100644
--- a/services/std_svc/errata_abi/cpu_errata_info.h
+++ b/services/std_svc/errata_abi/cpu_errata_info.h
@@ -8,6 +8,7 @@
 #define ERRATA_CPUSPEC_H
 
 #include <stdint.h>
+#include <arch.h>
 #include <arch_helpers.h>
 
 #if __aarch64__
@@ -31,8 +32,6 @@
 /* Default values for unused memory in the array */
 #define UNDEF_ERRATA		{UINT_MAX, UCHAR_MAX, UCHAR_MAX}
 
-#define EXTRACT_PARTNUM(x)	((x >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
-
 #define RXPX_RANGE(x, y, z)	(((x >= y) && (x <= z)) ? true : false)
 
 /*
diff --git a/services/std_svc/spm/el3_spmc/spmc_setup.c b/services/std_svc/spm/el3_spmc/spmc_setup.c
index e8beae1..4360832 100644
--- a/services/std_svc/spm/el3_spmc/spmc_setup.c
+++ b/services/std_svc/spm/el3_spmc/spmc_setup.c
@@ -322,14 +322,7 @@
 	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), mair_el1,
 		      mmu_cfg_params[MMU_CFG_MAIR]);
 
-	/* Store the initialised SCTLR_EL1 value in the cpu_context */
-#if (ERRATA_SPECULATIVE_AT)
-	write_ctx_reg(get_errata_speculative_at_ctx(ctx),
-		      CTX_ERRATA_SPEC_AT_TCR_EL1, mmu_cfg_params[MMU_CFG_TCR]);
-#else
-	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), tcr_el1,
-		      mmu_cfg_params[MMU_CFG_TCR]);
-#endif /* ERRATA_SPECULATIVE_AT */
+	write_ctx_tcr_el1_reg_errata(ctx, mmu_cfg_params[MMU_CFG_TCR]);
 
 	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), ttbr0_el1,
 		      mmu_cfg_params[MMU_CFG_TTBR0]);
@@ -340,12 +333,7 @@
 	u_register_t sctlr_el1_val;
 
 	/* Setup SCTLR_EL1 */
-#if (ERRATA_SPECULATIVE_AT)
-	sctlr_el1_val = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
-				 CTX_ERRATA_SPEC_AT_SCTLR_EL1);
-#else
-	sctlr_el1_val = read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1);
-#endif /* ERRATA_SPECULATIVE_AT */
+	sctlr_el1_val = read_ctx_sctlr_el1_reg_errata(ctx);
 
 	sctlr_el1_val |=
 		/*SCTLR_EL1_RES1 |*/
@@ -381,12 +369,7 @@
 	);
 
 	/* Store the initialised SCTLR_EL1 value in the cpu_context */
-#if (ERRATA_SPECULATIVE_AT)
-	write_ctx_reg(get_errata_speculative_at_ctx(ctx),
-		      CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_el1_val);
-#else
-	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_el1_val);
-#endif /* ERRATA_SPECULATIVE_AT */
+	write_ctx_sctlr_el1_reg_errata(ctx, sctlr_el1_val);
 }
 
 static void spmc_el0_sp_setup_system_registers(struct secure_partition_desc *sp,
diff --git a/services/std_svc/spm/spm_mm/spm_mm_main.c b/services/std_svc/spm/spm_mm/spm_mm_main.c
index 1ff7bb7..34e2c00 100644
--- a/services/std_svc/spm/spm_mm/spm_mm_main.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -13,6 +13,7 @@
 #include <common/debug.h>
 #include <common/runtime_svc.h>
 #include <lib/el3_runtime/context_mgmt.h>
+#include <lib/el3_runtime/simd_ctx.h>
 #include <lib/smccc.h>
 #include <lib/spinlock.h>
 #include <lib/utils.h>
@@ -190,13 +191,13 @@
 	uint64_t rc;
 	sp_context_t *sp_ptr = &sp_ctx;
 
-#if CTX_INCLUDE_FPREGS
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
 	/*
-	 * SP runs to completion, no need to restore FP registers of secure context.
-	 * Save FP registers only for non secure context.
+	 * SP runs to completion, no need to restore FP/SVE registers of secure context.
+	 * Save FP/SVE registers only for non secure context.
 	 */
-	fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
-#endif
+	simd_ctx_save(NON_SECURE, false);
+#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
 
 	/* Wait until the Secure Partition is idle and set it to busy. */
 	sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
@@ -216,13 +217,13 @@
 	assert(sp_ptr->state == SP_STATE_BUSY);
 	sp_state_set(sp_ptr, SP_STATE_IDLE);
 
-#if CTX_INCLUDE_FPREGS
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
 	/*
-	 * SP runs to completion, no need to save FP registers of secure context.
-	 * Restore only non secure world FP registers.
+	 * SP runs to completion, no need to save FP/SVE registers of secure context.
+	 * Restore only non secure world FP/SVE registers.
 	 */
-	fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
-#endif
+	simd_ctx_restore(NON_SECURE);
+#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
 
 	return rc;
 }
diff --git a/services/std_svc/spm/spm_mm/spm_mm_setup.c b/services/std_svc/spm/spm_mm/spm_mm_setup.c
index bb9c7a9..de05459 100644
--- a/services/std_svc/spm/spm_mm/spm_mm_setup.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_setup.c
@@ -124,26 +124,13 @@
 
 	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), mair_el1,
 		      mmu_cfg_params[MMU_CFG_MAIR]);
-
-	/* Store the initialised SCTLR_EL1 value in the cpu_context */
-#if (ERRATA_SPECULATIVE_AT)
-	write_ctx_reg(get_errata_speculative_at_ctx(ctx),
-		      CTX_ERRATA_SPEC_AT_TCR_EL1, mmu_cfg_params[MMU_CFG_TCR]);
-#else
-	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), tcr_el1,
-		      mmu_cfg_params[MMU_CFG_TCR]);
-#endif /* ERRATA_SPECULATIVE_AT */
+	write_ctx_tcr_el1_reg_errata(ctx, mmu_cfg_params[MMU_CFG_TCR]);
 
 	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), ttbr0_el1,
 		      mmu_cfg_params[MMU_CFG_TTBR0]);
 
 	/* Setup SCTLR_EL1 */
-#if (ERRATA_SPECULATIVE_AT)
-	sctlr_el1_val = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
-				 CTX_ERRATA_SPEC_AT_SCTLR_EL1);
-#else
-	sctlr_el1_val = read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1);
-#endif /* ERRATA_SPECULATIVE_AT */
+	sctlr_el1_val = read_ctx_sctlr_el1_reg_errata(ctx);
 
 	sctlr_el1_val |=
 		/*SCTLR_EL1_RES1 |*/
@@ -180,12 +167,7 @@
 	);
 
 	/* Store the initialised SCTLR_EL1 value in the cpu_context */
-#if (ERRATA_SPECULATIVE_AT)
-	write_ctx_reg(get_errata_speculative_at_ctx(ctx),
-		      CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_el1_val);
-#else
-	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_el1_val);
-#endif /* ERRATA_SPECULATIVE_AT */
+	write_ctx_sctlr_el1_reg_errata(ctx, sctlr_el1_val);
 
 	/*
 	 * Setup other system registers
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index e3d7fbd..0a246f3 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -215,6 +215,14 @@
 	cm_el2_sysregs_context_save(NON_SECURE);
 #else
 	cm_el1_sysregs_context_save(NON_SECURE);
+
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+	/*
+	 * The hint bit denoting absence of SVE live state is effectively false
+	 * in this scenario where execution was trapped to EL3 due to FIQ.
+	 */
+	simd_ctx_save(NON_SECURE, false);
+#endif
 #endif
 
 	/* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
@@ -230,7 +238,14 @@
 	/* Mark current core as handling a secure interrupt. */
 	ctx->secure_interrupt_ongoing = true;
 
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+	simd_ctx_restore(SECURE);
+#endif
 	rc = spmd_spm_core_sync_entry(ctx);
+
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+	simd_ctx_save(SECURE, false);
+#endif
 	if (rc != 0ULL) {
 		ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos());
 	}
@@ -241,6 +256,10 @@
 	cm_el2_sysregs_context_restore(NON_SECURE);
 #else
 	cm_el1_sysregs_context_restore(NON_SECURE);
+
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+	simd_ctx_restore(NON_SECURE);
+#endif
 #endif
 	cm_set_next_eret_context(NON_SECURE);
 
@@ -678,6 +697,10 @@
 	cm_el2_sysregs_context_save(secure_state_in);
 #else
 	cm_el1_sysregs_context_save(secure_state_in);
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+	/* Forward the hint bit denoting the absence of SVE live state. */
+	simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true)));
+#endif
 #endif
 
 	/* Restore outgoing security state */
@@ -685,6 +708,9 @@
 	cm_el2_sysregs_context_restore(secure_state_out);
 #else
 	cm_el1_sysregs_context_restore(secure_state_out);
+#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
+	simd_ctx_restore(secure_state_out);
+#endif
 #endif
 	cm_set_next_eret_context(secure_state_out);