feat(mt8196): add pwr_ctrl module for CPU power management
Implement pwr_ctrl module to manage CPU power.
Change-Id: I73a7a8a2d0b120b7225c2f323990176397b6e4a5
diff --git a/plat/mediatek/include/lib/pm/mtk_pm.h b/plat/mediatek/include/lib/pm/mtk_pm.h
new file mode 100644
index 0000000..14d005d
--- /dev/null
+++ b/plat/mediatek/include/lib/pm/mtk_pm.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2025, Mediatek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MTK_PM_H
+#define MTK_PM_H
+#include <lib/psci/psci.h>
+
+#if MTK_PUBEVENT_ENABLE
+#include <vendor_pubsub_events.h>
+#endif
+
+#define MTK_CPUPM_E_OK 0
+#define MTK_CPUPM_E_UNKNOWN -1
+#define MTK_CPUPM_E_ERR -2
+#define MTK_CPUPM_E_FAIL -3
+#define MTK_CPUPM_E_NOT_SUPPORT -4
+
+#define MTK_CPUPM_FN_PWR_LOCK_AQUIRE BIT(0)
+#define MTK_CPUPM_FN_INIT BIT(1)
+#define MTK_CPUPM_FN_PWR_STATE_VALID BIT(2)
+#define MTK_CPUPM_FN_PWR_ON_CORE_PREPARE BIT(3)
+#define MTK_CPUPM_FN_SUSPEND_CORE BIT(4)
+#define MTK_CPUPM_FN_RESUME_CORE BIT(5)
+#define MTK_CPUPM_FN_SUSPEND_CLUSTER BIT(6)
+#define MTK_CPUPM_FN_RESUME_CLUSTER BIT(7)
+#define MTK_CPUPM_FN_SUSPEND_MCUSYS BIT(8)
+#define MTK_CPUPM_FN_RESUME_MCUSYS BIT(9)
+#define MTK_CPUPM_FN_CPUPM_GET_PWR_STATE BIT(10)
+#define MTK_CPUPM_FN_SMP_INIT BIT(11)
+#define MTK_CPUPM_FN_SMP_CORE_ON BIT(12)
+#define MTK_CPUPM_FN_SMP_CORE_OFF BIT(13)
+#define MTK_CPUPM_FN_PWR_DOMAIN_POWER_DOWN_WFI BIT(14)
+
+enum mtk_cpupm_pstate {
+ MTK_CPUPM_CORE_ON,
+ MTK_CPUPM_CORE_OFF,
+ MTK_CPUPM_CORE_SUSPEND,
+ MTK_CPUPM_CORE_RESUME,
+ MTK_CPUPM_CLUSTER_SUSPEND,
+ MTK_CPUPM_CLUSTER_RESUME,
+ MTK_CPUPM_MCUSYS_SUSPEND,
+ MTK_CPUPM_MCUSYS_RESUME,
+};
+
+enum mtk_cpu_pm_mode {
+ MTK_CPU_PM_CPUIDLE,
+ MTK_CPU_PM_SMP,
+};
+
+#define MT_IRQ_REMAIN_MAX 32
+#define MT_IRQ_REMAIN_CAT_LOG BIT(31)
+
+struct mt_irqremain {
+ unsigned int count;
+ unsigned int irqs[MT_IRQ_REMAIN_MAX];
+ unsigned int wakeupsrc_cat[MT_IRQ_REMAIN_MAX];
+ unsigned int wakeupsrc[MT_IRQ_REMAIN_MAX];
+};
+
+typedef void (*plat_init_func)(unsigned int, uintptr_t);
+struct plat_pm_smp_ctrl {
+ plat_init_func init;
+ int (*pwr_domain_on)(u_register_t mpidr);
+ void (*pwr_domain_off)(const psci_power_state_t *target_state);
+ void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
+};
+
+struct plat_pm_pwr_ctrl {
+ void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
+ void (*pwr_domain_on_finish_late)(
+ const psci_power_state_t *target_state);
+ void (*pwr_domain_suspend_finish)(
+ const psci_power_state_t *target_state);
+ int (*validate_power_state)(unsigned int power_state,
+ psci_power_state_t *req_state);
+ void (*get_sys_suspend_power_state)(
+ psci_power_state_t *req_state);
+ __dead2 void (*pwr_domain_pwr_down_wfi)(
+ const psci_power_state_t *req_state);
+};
+
+struct plat_pm_reset_ctrl {
+ __dead2 void (*system_off)();
+ __dead2 void (*system_reset)();
+ int (*system_reset2)(int is_vendor,
+ int reset_type,
+ u_register_t cookie);
+};
+
+struct mtk_cpu_pm_info {
+ unsigned int cpuid;
+ unsigned int mode;
+};
+
+struct mtk_cpu_pm_state {
+ unsigned int afflv;
+ unsigned int state_id;
+ const psci_power_state_t *raw;
+};
+
+struct mtk_cpupm_pwrstate {
+ struct mtk_cpu_pm_info info;
+ struct mtk_cpu_pm_state pwr;
+};
+
+struct mtk_cpu_smp_ops {
+ void (*init)(unsigned int cpu, uintptr_t sec_entrypoint);
+ int (*cpu_pwr_on_prepare)(unsigned int cpu, uintptr_t entry);
+ void (*cpu_on)(const struct mtk_cpupm_pwrstate *state);
+ void (*cpu_off)(const struct mtk_cpupm_pwrstate *state);
+ int (*invoke)(unsigned int funcID, void *priv);
+};
+
+#define CPUPM_PWR_REQ_UID_MAGIC 0x1103BAAD
+
+#ifdef CPU_PM_PWR_REQ_DEBUG
+#define DECLARE_CPUPM_PWR_REQ(var_name)\
+ static struct cpupm_pwr_req var_name = {\
+ .stat.name = #var_name,\
+ .stat.uid = CPUPM_PWR_REQ_UID_MAGIC,\
+ .stat.sta_req = 0,\
+ }
+#else
+#define DECLARE_CPUPM_PWR_REQ(name)\
+ static struct cpupm_pwr_req name = {\
+ .stat.uid = CPUPM_PWR_REQ_UID_MAGIC,\
+ .stat.sta_req = 0,\
+ }
+#endif
+
+#define CPUPM_PWR_REQ_ACTIVE(_cpupm_req) ({\
+ int in_ret;\
+ in_ret = plat_pm_invoke_func(MTK_CPU_PM_CPUIDLE,\
+ CPUPM_INVOKE_PWR_REQ_ACTIVE,\
+ &_cpupm_req);\
+ in_ret; })
+
+#define CPUPM_PWR_REQ_ACQUIRE(_cpupm_req, _pm_req) ({\
+ int in_ret;\
+ _cpupm_req.req = _pm_req;\
+ in_ret = plat_pm_invoke_func(MTK_CPU_PM_CPUIDLE,\
+ CPUPM_INVOKE_PWR_REQ_ACQUIRE,\
+ &_cpupm_req);\
+ in_ret; })
+
+#define CPUPM_PWR_REQ_RELEASE(_cpupm_req, _pm_req) ({\
+ int in_ret;\
+ _cpupm_req.req = _pm_req;\
+ in_ret = plat_pm_invoke_func(MTK_CPU_PM_CPUIDLE,\
+ CPUPM_INVOKE_PWR_REQ_RELASE,\
+ &_cpupm_req);\
+ in_ret; })
+
+struct cpupm_pwr_stat_req {
+ unsigned int sta_req;
+ unsigned int uid;
+#ifdef CPU_PM_PWR_REQ_DEBUG
+ const char *name;
+#endif
+};
+
+struct cpupm_pwr_req {
+ unsigned int req;
+ struct cpupm_pwr_stat_req stat;
+};
+
+struct cpupm_invoke_data {
+ union {
+ unsigned int v_u32;
+ struct cpupm_pwr_req *req;
+ } val;
+};
+
+enum cpupm_invoke_func_id {
+ /* Get regular active cpumask */
+ CPUPM_INVOKE_WAKED_CPU = 0,
+ CPUPM_INVOKE_PWR_REQ_ACTIVE,
+ CPUPM_INVOKE_PWR_REQ_ACQUIRE,
+ CPUPM_INVOKE_PWR_REQ_RELASE,
+};
+
+#define MT_CPUPM_MCUSYS_REQ (MT_CPUPM_PWR_DOMAIN_MCUSYS | \
+ MT_CPUPM_PWR_DOMAIN_MCUSYS_BY_CLUSTER)
+#define MT_CPUPM_PWR_DOMAIN_CORE BIT(0)
+#define MT_CPUPM_PWR_DOMAIN_PERCORE_DSU BIT(1)
+#define MT_CPUPM_PWR_DOMAIN_PERCORE_DSU_MEM BIT(2)
+#define MT_CPUPM_PWR_DOMAIN_CLUSTER BIT(3)
+#define MT_CPUPM_PWR_DOMAIN_MCUSYS BIT(4)
+#define MT_CPUPM_PWR_DOMAIN_SUSPEND BIT(5)
+#define MT_CPUPM_PWR_DOMAIN_MCUSYS_BY_CLUSTER BIT(6)
+
+enum mt_cpupm_pwr_domain {
+ CPUPM_PWR_ON,
+ CPUPM_PWR_OFF,
+};
+
+#define mtk_pstate_type unsigned int
+
+struct mtk_cpu_pm_ops {
+ void (*init)(unsigned int cpu, uintptr_t sec_entrypoint);
+
+ unsigned int (*get_pstate)(enum mt_cpupm_pwr_domain domain,
+ const mtk_pstate_type psci_state,
+ const struct mtk_cpupm_pwrstate *state);
+
+ int (*pwr_state_valid)(unsigned int afflv, unsigned int state);
+
+ void (*cpu_suspend)(const struct mtk_cpupm_pwrstate *state);
+ void (*cpu_resume)(const struct mtk_cpupm_pwrstate *state);
+
+ void (*cluster_suspend)(const struct mtk_cpupm_pwrstate *state);
+ void (*cluster_resume)(const struct mtk_cpupm_pwrstate *state);
+
+ void (*mcusys_suspend)(const struct mtk_cpupm_pwrstate *state);
+ void (*mcusys_resume)(const struct mtk_cpupm_pwrstate *state);
+ int (*pwr_domain_pwr_down_wfi)(unsigned int cpu);
+
+ int (*invoke)(unsigned int funcID, void *priv);
+};
+
+int register_cpu_pm_ops(unsigned int fn_flags, struct mtk_cpu_pm_ops *ops);
+int register_cpu_smp_ops(unsigned int fn_flags, struct mtk_cpu_smp_ops *ops);
+
+struct mt_cpupm_event_data {
+ unsigned int cpuid;
+ unsigned int pwr_domain;
+};
+
+/* Extension event for platform driver */
+#if MTK_PUBEVENT_ENABLE
+/* [PUB_EVENT] Core power on */
+#define MT_CPUPM_SUBCRIBE_EVENT_PWR_ON(_fn) \
+ SUBSCRIBE_TO_EVENT(mt_cpupm_publish_pwr_on, _fn)
+
+/* [PUB_EVENT] Core power off */
+#define MT_CPUPM_SUBCRIBE_EVENT_PWR_OFF(_fn) \
+ SUBSCRIBE_TO_EVENT(mt_cpupm_publish_pwr_off, _fn)
+
+/* [PUB_EVENT] Cluster power on */
+#define MT_CPUPM_SUBCRIBE_CLUSTER_PWR_ON(_fn) \
+ SUBSCRIBE_TO_EVENT(mt_cpupm_publish_afflv_pwr_on, _fn)
+
+/* [PUB_EVENT] Cluster power off */
+#define MT_CPUPM_SUBCRIBE_CLUSTER_PWR_OFF(_fn) \
+ SUBSCRIBE_TO_EVENT(mt_cpupm_publish_afflv_pwr_off, _fn)
+
+/* [PUB_EVENT] Mcusys power on */
+#define MT_CPUPM_SUBCRIBE_MCUSYS_PWR_ON(_fn) \
+ SUBSCRIBE_TO_EVENT(mt_cpupm_publish_afflv_pwr_on, _fn)
+
+/* [PUB_EVENT] Mcusys power off */
+#define MT_CPUPM_SUBCRIBE_MCUSYS_PWR_OFF(_fn) \
+ SUBSCRIBE_TO_EVENT(mt_cpupm_publish_afflv_pwr_off, _fn)
+
+/* [PUB_EVENT] el3 time sync */
+#define MT_CPUPM_SUBCRIBE_EL3_UPTIME_SYNC_WITH_KERNEL(_fn) \
+ SUBSCRIBE_TO_EVENT(el3_uptime_sync_with_kernel, _fn)
+#else
+#define MT_CPUPM_SUBCRIBE_EVENT_PWR_ON(_fn)
+#define MT_CPUPM_SUBCRIBE_EVENT_PWR_OFF(_fn)
+#define MT_CPUPM_SUBCRIBE_CLUSTER_PWR_ON(_fn)
+#define MT_CPUPM_SUBCRIBE_CLUSTER_PWR_OFF(_fn)
+#define MT_CPUPM_SUBCRIBE_MCUSYS_PWR_ON(_fn)
+#define MT_CPUPM_SUBCRIBE_MCUSYS_PWR_OFF(_fn)
+#define MT_CPUPM_SUBCRIBE_EL3_UPTIME_SYNC_WITH_KERNEL(_fn)
+#endif
+
+/*
+ * Definition c-state power domain.
+ * bit 0: Cluster
+ * bit 1: CPU buck
+ * bit 2: Mcusys
+ * bit 3: Memory
+ * bit 4: System pll
+ * bit 5: System bus
+ * bit 6: SoC 26m/DCXO
+ * bit 7: Vcore buck
+ * bit 8~14: Reserved
+ * bit 15: Suspend
+ */
+#define MT_PLAT_PWR_STATE_CLUSTER 0x0001
+#define MT_PLAT_PWR_STATE_MCUSYS 0x0005
+#define MT_PLAT_PWR_STATE_MCUSYS_BUCK 0x0007
+#define MT_PLAT_PWR_STATE_SYSTEM_MEM 0x000F
+#define MT_PLAT_PWR_STATE_SYSTEM_PLL 0x001F
+#define MT_PLAT_PWR_STATE_SYSTEM_BUS 0x007F
+#define MT_PLAT_PWR_STATE_SYSTEM_VCORE 0x00FF
+#define MT_PLAT_PWR_STATE_SUSPEND 0x80FF
+
+#define IS_MT_PLAT_PWR_STATE(_state, _tar) \
+ (((_state) & _tar) == _tar)
+#define IS_MT_PLAT_PWR_STATE_MCUSYS(state) \
+ IS_MT_PLAT_PWR_STATE(state, MT_PLAT_PWR_STATE_MCUSYS)
+#define IS_MT_PLAT_PWR_STATE_SYSTEM(state) ((state) & 0x7ff8)
+
+#ifdef PLAT_AFFLV_SYSTEM
+#define PLAT_MT_SYSTEM_SUSPEND PLAT_AFFLV_SYSTEM
+#else
+#define PLAT_MT_SYSTEM_SUSPEND PLAT_MAX_OFF_STATE
+#endif
+
+#ifdef PLAT_AFFLV_CLUSTER
+#define PLAT_MT_CPU_SUSPEND_CLUSTER PLAT_AFFLV_CLUSTER
+#else
+#define PLAT_MT_CPU_SUSPEND_CLUSTER PLAT_MAX_RET_STATE
+#endif
+
+#ifdef PLAT_AFFLV_MCUSYS
+#define PLAT_MT_CPU_SUSPEND_MCUSYS PLAT_AFFLV_MCUSYS
+#else
+#define PLAT_MT_CPU_SUSPEND_MCUSYS PLAT_MAX_RET_STATE
+#endif
+
+#define IS_PLAT_SYSTEM_SUSPEND(aff) ((aff) == PLAT_MT_SYSTEM_SUSPEND)
+#define IS_PLAT_SYSTEM_RETENTION(aff) ((aff) >= PLAT_MAX_RET_STATE)
+
+#define IS_PLAT_SUSPEND_ID(stateid) \
+ ((stateid) == MT_PLAT_PWR_STATE_SUSPEND)
+
+#define IS_PLAT_MCUSYSOFF_AFFLV(_afflv) \
+ ((_afflv) >= PLAT_MT_CPU_SUSPEND_MCUSYS)
+
+int plat_pm_ops_setup_pwr(struct plat_pm_pwr_ctrl *ops);
+
+int plat_pm_ops_setup_reset(struct plat_pm_reset_ctrl *ops);
+
+int plat_pm_ops_setup_smp(struct plat_pm_smp_ctrl *ops);
+
+uintptr_t plat_pm_get_warm_entry(void);
+
+int plat_pm_invoke_func(enum mtk_cpu_pm_mode mode, unsigned int id, void *priv);
+
+#endif
diff --git a/plat/mediatek/lib/pm/armv9_0/pwr_ctrl.c b/plat/mediatek/lib/pm/armv9_0/pwr_ctrl.c
new file mode 100644
index 0000000..a0171bf
--- /dev/null
+++ b/plat/mediatek/lib/pm/armv9_0/pwr_ctrl.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2025, Mediatek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#ifdef CONFIG_MTK_BOOKER
+#include <drivers/booker.h>
+#endif
+
+#include <common/debug.h>
+#include <drivers/arm/gicv3.h>
+#include <drivers/console.h>
+#include <lib/psci/psci.h>
+#include <lib/utils.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#include <lib/mtk_init/mtk_init.h>
+#include <lib/pm/mtk_pm.h>
+#ifdef MTK_PUBEVENT_ENABLE
+#include <vendor_pubsub_events.h>
+#endif
+
+#define IS_AFFLV_PUBEVENT(_pstate) \
+ (_pstate & (MT_CPUPM_PWR_DOMAIN_MCUSYS | \
+ MT_CPUPM_PWR_DOMAIN_CLUSTER))
+
+#ifdef MTK_PUBEVENT_ENABLE
+#define MT_CPUPM_EVENT_PWR_ON(x) ({ \
+ PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_on, \
+ (const void *)(x)); })
+
+#define MT_CPUPM_EVENT_PWR_OFF(x) ({ \
+ PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_off, \
+ (const void *)(x)); })
+
+#define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ \
+ PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_on, \
+ (const void *)(x)); })
+
+#define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ \
+ PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_off, \
+ (const void *)(x)); })
+
+#else
+#define MT_CPUPM_EVENT_PWR_ON(x) ({ (void)x; })
+#define MT_CPUPM_EVENT_PWR_OFF(x) ({ (void)x; })
+#define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ (void)x; })
+#define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ (void)x; })
+#endif
+
+/*
+ * The cpu require to cluster power stattus
+ * [0] : The cpu require cluster power down
+ * [1] : The cpu require cluster power on
+ */
+#define coordinate_cluster(onoff) \
+ write_clusterpwrdn_el1(onoff)
+#define coordinate_cluster_pwron() \
+ coordinate_cluster(1)
+#define coordinate_cluster_pwroff() \
+ coordinate_cluster(0)
+
+/* default enable all function */
+#define MTK_CPU_PWR_FN_MASK_DEFAULT (0)
+
+struct mtk_cpu_pwr_ctrl {
+ unsigned int fn_mask;
+ struct mtk_cpu_pm_ops *ops;
+ struct mtk_cpu_smp_ops *smp;
+};
+
+static struct mtk_cpu_pwr_ctrl imtk_cpu_pwr = {
+ .fn_mask = MTK_CPU_PWR_FN_MASK_DEFAULT,
+ .ops = NULL,
+};
+
+#define IS_CPUIDLE_FN_ENABLE(x) (imtk_cpu_pwr.ops && (imtk_cpu_pwr.fn_mask & (x)))
+#define IS_CPUSMP_FN_ENABLE(x) (imtk_cpu_pwr.smp && (imtk_cpu_pwr.fn_mask & (x)))
+
+/* per-cpu power state */
+static unsigned int cpu_power_state[PLATFORM_CORE_COUNT];
+
+#define get_pwr_stateid(cpu) \
+ psci_get_pstate_id(cpu_power_state[cpu])
+
+#define GET_MEDIATEK_PSTATE(_domain, _psci_state, _state) ({ \
+ int mret = 0; \
+ if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_CPUPM_GET_PWR_STATE)) \
+ mret = imtk_cpu_pwr.ops->get_pstate( \
+ _domain, _psci_state, _state); \
+ mret; })
+
+static inline unsigned int get_pwr_afflv(const psci_power_state_t *state)
+{
+ for (int i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+ if (is_local_state_run(state->pwr_domain_state[i]) == 0)
+ return (unsigned int) i;
+ }
+
+ return PSCI_INVALID_PWR_LVL;
+}
+
+static void mcusys_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
+{
+ if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_MCUSYS))
+ imtk_cpu_pwr.ops->mcusys_resume(state);
+}
+
+static void mcusys_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
+{
+#ifdef CONFIG_MTK_BOOKER
+ booker_flush();
+#endif
+
+ if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_MCUSYS))
+ imtk_cpu_pwr.ops->mcusys_suspend(state);
+
+}
+
+static void cluster_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
+{
+ if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CLUSTER))
+ imtk_cpu_pwr.ops->cluster_resume(state);
+}
+
+static void cluster_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
+{
+ if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CLUSTER))
+ imtk_cpu_pwr.ops->cluster_suspend(state);
+}
+
+static void cpu_pwr_on_common(const struct mtk_cpupm_pwrstate *state,
+ unsigned int pstate)
+{
+ coordinate_cluster_pwron();
+
+ gicv3_rdistif_init(plat_my_core_pos());
+ gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+static void cpu_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state,
+ unsigned int pstate)
+{
+ if (pstate & MT_CPUPM_PWR_DOMAIN_PERCORE_DSU)
+ coordinate_cluster_pwroff();
+}
+
+static void cpu_pwr_resume(const struct mtk_cpupm_pwrstate *state,
+ unsigned int pstate)
+{
+ cpu_pwr_on_common(state, pstate);
+ if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CORE))
+ imtk_cpu_pwr.ops->cpu_resume(state);
+}
+
+static void cpu_pwr_suspend(const struct mtk_cpupm_pwrstate *state,
+ unsigned int pstate)
+{
+ if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CORE))
+ imtk_cpu_pwr.ops->cpu_suspend(state);
+ cpu_pwr_dwn_common(state, pstate);
+}
+
+static void cpu_pwr_on(const struct mtk_cpupm_pwrstate *state,
+ unsigned int pstate)
+{
+ cpu_pwr_on_common(state, pstate);
+ if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_ON))
+ imtk_cpu_pwr.smp->cpu_on(state);
+}
+
+static void cpu_pwr_off(const struct mtk_cpupm_pwrstate *state,
+ unsigned int pstate)
+{
+ if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_OFF))
+ imtk_cpu_pwr.smp->cpu_off(state);
+ cpu_pwr_dwn_common(state, pstate);
+}
+
+static int power_domain_on(u_register_t mpidr)
+{
+ int ret = PSCI_E_SUCCESS;
+ int cpu = plat_core_pos_by_mpidr(mpidr);
+ uintptr_t entry = plat_pm_get_warm_entry();
+
+ if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_PWR_ON_CORE_PREPARE)) {
+ int b_ret = MTK_CPUPM_E_FAIL;
+
+ b_ret = imtk_cpu_pwr.smp->cpu_pwr_on_prepare(cpu, entry);
+
+ if (b_ret)
+ ret = PSCI_E_DENIED;
+ }
+ INFO("CPU %u power domain prepare on\n", cpu);
+ return ret;
+}
+
+static void power_domain_on_finish(const psci_power_state_t *state)
+{
+ struct mt_cpupm_event_data nb;
+ unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE |
+ MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
+ struct mtk_cpupm_pwrstate pm_state = {
+ .info = {
+ .cpuid = plat_my_core_pos(),
+ .mode = MTK_CPU_PM_SMP,
+ },
+ .pwr = {
+ .afflv = get_pwr_afflv(state),
+ .state_id = 0x0,
+ },
+ };
+
+ cpu_pwr_on(&pm_state, pstate);
+
+ nb.cpuid = pm_state.info.cpuid;
+ nb.pwr_domain = pstate;
+ MT_CPUPM_EVENT_PWR_ON(&nb);
+ INFO("CPU %u power domain on finished\n", pm_state.info.cpuid);
+}
+
+static void power_domain_off(const psci_power_state_t *state)
+{
+ struct mt_cpupm_event_data nb;
+ unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE |
+ MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
+ struct mtk_cpupm_pwrstate pm_state = {
+ .info = {
+ .cpuid = plat_my_core_pos(),
+ .mode = MTK_CPU_PM_SMP,
+ },
+ .pwr = {
+ .afflv = get_pwr_afflv(state),
+ .state_id = 0x0,
+ },
+ };
+
+ cpu_pwr_off(&pm_state, pstate);
+
+ gicv3_rdistif_off(plat_my_core_pos());
+
+ nb.cpuid = pm_state.info.cpuid;
+ nb.pwr_domain = pstate;
+ MT_CPUPM_EVENT_PWR_OFF(&nb);
+
+ INFO("CPU %u power domain off\n", pm_state.info.cpuid);
+}
+
+static void power_domain_suspend(const psci_power_state_t *state)
+{
+ unsigned int pstate = 0;
+ struct mt_cpupm_event_data nb;
+ struct mtk_cpupm_pwrstate pm_state = {
+ .info = {
+ .cpuid = plat_my_core_pos(),
+ .mode = MTK_CPU_PM_CPUIDLE,
+ },
+ };
+
+ pm_state.pwr.state_id = get_pwr_stateid(pm_state.info.cpuid);
+ pm_state.pwr.afflv = get_pwr_afflv(state);
+ pm_state.pwr.raw = state;
+
+ pstate = GET_MEDIATEK_PSTATE(CPUPM_PWR_OFF,
+ cpu_power_state[pm_state.info.cpuid], &pm_state);
+
+ cpu_pwr_suspend(&pm_state, pstate);
+
+ if (pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER)
+ cluster_pwr_dwn_common(&pm_state);
+
+ if (pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS)
+ mcusys_pwr_dwn_common(&pm_state);
+
+ gicv3_rdistif_off(plat_my_core_pos());
+
+ nb.cpuid = pm_state.info.cpuid;
+ nb.pwr_domain = pstate;
+ MT_CPUPM_EVENT_PWR_OFF(&nb);
+
+ if (IS_AFFLV_PUBEVENT(pstate))
+ MT_CPUPM_EVENT_AFFLV_PWR_OFF(&nb);
+}
+
+static void power_domain_suspend_finish(const psci_power_state_t *state)
+{
+ unsigned int pstate = 0;
+ struct mt_cpupm_event_data nb;
+ struct mtk_cpupm_pwrstate pm_state = {
+ .info = {
+ .cpuid = plat_my_core_pos(),
+ .mode = MTK_CPU_PM_CPUIDLE,
+ },
+ };
+
+ pm_state.pwr.state_id = get_pwr_stateid(pm_state.info.cpuid);
+ pm_state.pwr.afflv = get_pwr_afflv(state);
+ pm_state.pwr.raw = state;
+
+ pstate = GET_MEDIATEK_PSTATE(CPUPM_PWR_ON,
+ cpu_power_state[pm_state.info.cpuid], &pm_state);
+
+ if (pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS)
+ mcusys_pwr_on_common(&pm_state);
+
+ if (pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER)
+ cluster_pwr_on_common(&pm_state);
+
+ cpu_pwr_resume(&pm_state, pstate);
+
+ nb.cpuid = pm_state.info.cpuid;
+ nb.pwr_domain = pstate;
+ MT_CPUPM_EVENT_PWR_ON(&nb);
+
+ if (IS_AFFLV_PUBEVENT(pstate))
+ MT_CPUPM_EVENT_AFFLV_PWR_ON(&nb);
+}
+
+static int validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int i;
+ unsigned int pstate = psci_get_pstate_type(power_state);
+ int aff_lvl = psci_get_pstate_pwrlvl(power_state);
+ unsigned int my_core_pos = plat_my_core_pos();
+
+ if (!imtk_cpu_pwr.ops)
+ return PSCI_E_INVALID_PARAMS;
+
+ if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_PWR_STATE_VALID)) {
+ int ret = MTK_CPUPM_E_FAIL;
+
+ ret = imtk_cpu_pwr.ops->pwr_state_valid(aff_lvl, pstate);
+ if (ret)
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ if (pstate == PSTATE_TYPE_STANDBY)
+ req_state->pwr_domain_state[0] = PLAT_MAX_RET_STATE;
+ else {
+ for (i = PSCI_CPU_PWR_LVL; i <= aff_lvl; i++)
+ req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+ }
+ cpu_power_state[my_core_pos] = power_state;
+ return PSCI_E_SUCCESS;
+}
+
+#if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
+/* Mediatek PSCI power domain */
+static void get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+ int lv = 0;
+ unsigned int my_core_pos = plat_my_core_pos();
+
+ for (lv = PSCI_CPU_PWR_LVL; lv <= PLAT_MAX_PWR_LVL; lv++)
+ req_state->pwr_domain_state[lv] = PLAT_MAX_OFF_STATE;
+
+ cpu_power_state[my_core_pos] = psci_make_powerstate(
+ MT_PLAT_PWR_STATE_SUSPEND,
+ PSTATE_TYPE_POWERDOWN,
+ PLAT_MT_SYSTEM_SUSPEND);
+
+ flush_dcache_range((uintptr_t)&cpu_power_state[my_core_pos],
+ sizeof(cpu_power_state[my_core_pos]));
+}
+#endif
+
+static void pm_smp_init(unsigned int cpu_id, uintptr_t entry_point)
+{
+ if (entry_point == 0) {
+ ERROR("%s, warm_entry_point is null\n", __func__);
+ panic();
+ }
+ if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_INIT))
+ imtk_cpu_pwr.smp->init(cpu_id, entry_point);
+ INFO("[%s:%d] - Initialize finished\n", __func__, __LINE__);
+}
+
+struct plat_pm_smp_ctrl armv9_0_smp_ops = {
+ .init = pm_smp_init,
+ .pwr_domain_on = power_domain_on,
+ .pwr_domain_off = power_domain_off,
+ .pwr_domain_on_finish = power_domain_on_finish,
+};
+
+#define ISSUE_CPU_PM_REG_FAIL(_success) ({ \
+ _success = 0; assert(0); })
+
+#define CPM_PM_FN_CHECK(_fns, _ops, _id, _func, _cond_ex, _result, _flag) ({ \
+ if ((_fns & _id)) { \
+ if (_ops->_func && _cond_ex) \
+ _flag |= _id; \
+ else { \
+ ISSUE_CPU_PM_REG_FAIL(_result); \
+ } \
+ } }) \
+
+int plat_pm_invoke_func(enum mtk_cpu_pm_mode mode, unsigned int id, void *priv)
+{
+ int ret = MTK_CPUPM_E_ERR;
+
+ if ((mode == MTK_CPU_PM_CPUIDLE) && imtk_cpu_pwr.ops &&
+ imtk_cpu_pwr.ops->invoke)
+ ret = imtk_cpu_pwr.ops->invoke(id, priv);
+ else if ((mode == MTK_CPU_PM_SMP) &&
+ imtk_cpu_pwr.smp &&
+ imtk_cpu_pwr.smp->invoke)
+ ret = imtk_cpu_pwr.smp->invoke(id, priv);
+
+ return ret;
+}
+
+int register_cpu_smp_ops(unsigned int fn_flags, struct mtk_cpu_smp_ops *ops)
+{
+ int success = 1;
+ unsigned int fns = 0;
+
+ if (!ops || imtk_cpu_pwr.smp) {
+ ERROR("[%s:%d] register cpu_smp fail !!\n", __FILE__, __LINE__);
+ return MTK_CPUPM_E_ERR;
+ }
+
+ CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_INIT,
+ init, 1, success, fns);
+
+ CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_ON_CORE_PREPARE,
+ cpu_pwr_on_prepare, 1, success, fns);
+
+ CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_ON,
+ cpu_on, 1, success, fns);
+
+ CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_OFF,
+ cpu_off, 1, success, fns);
+
+ if (success) {
+ imtk_cpu_pwr.smp = ops;
+ imtk_cpu_pwr.fn_mask |= fns;
+ plat_pm_ops_setup_smp(&armv9_0_smp_ops);
+ INFO("[%s:%d] CPU smp ops register success, support:0x%x\n",
+ __func__, __LINE__, fns);
+ } else {
+ ERROR("[%s:%d] register cpu_smp ops fail !, fn:0x%x\n",
+ __func__, __LINE__, fn_flags);
+ assert(0);
+ }
+ return MTK_CPUPM_E_OK;
+}
diff --git a/plat/mediatek/lib/pm/armv9_0/rules.mk b/plat/mediatek/lib/pm/armv9_0/rules.mk
index 08a7957..43ffb15 100644
--- a/plat/mediatek/lib/pm/armv9_0/rules.mk
+++ b/plat/mediatek/lib/pm/armv9_0/rules.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2024, MediaTek Inc. All rights reserved.
+# Copyright (c) 2025, MediaTek Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -8,6 +8,6 @@
MODULE := armv${CONFIG_MTK_PM_ARCH}
-LOCAL_SRCS-y :=
+LOCAL_SRCS-y := ${LOCAL_DIR}/pwr_ctrl.c
$(eval $(call MAKE_MODULE,$(MODULE),$(LOCAL_SRCS-y),$(MTK_BL)))
diff --git a/plat/mediatek/lib/pm/mtk_pm.h b/plat/mediatek/lib/pm/mtk_pm.h
index 4a29439..0ee15e9 100644
--- a/plat/mediatek/lib/pm/mtk_pm.h
+++ b/plat/mediatek/lib/pm/mtk_pm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, Mediatek Inc. All rights reserved.
+ * Copyright (c) 2025, Mediatek Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,7 +18,6 @@
#define MTK_CPUPM_E_FAIL (-3)
#define MTK_CPUPM_E_NOT_SUPPORT (-4)
-
#define MTK_CPUPM_FN_PWR_LOCK_AQUIRE BIT(0)
#define MTK_CPUPM_FN_INIT BIT(1)
#define MTK_CPUPM_FN_PWR_STATE_VALID BIT(2)
@@ -170,6 +169,10 @@
#define MT_CPUPM_SUBCRIBE_MCUSYS_PWR_OFF(_fn) \
SUBSCRIBE_TO_EVENT(mt_cpupm_publish_afflv_pwr_off, _fn)
+/* [PUB_EVENT] el3 time sync */
+#define MT_CPUPM_SUBCRIBE_EL3_UPTIME_SYNC_WITH_KERNEL(_fn) \
+ SUBSCRIBE_TO_EVENT(el3_uptime_sync_with_kernel, _fn)
+
#else
#define MT_CPUPM_SUBCRIBE_EVENT_PWR_ON(_fn)
#define MT_CPUPM_SUBCRIBE_EVENT_PWR_OFF(_fn)
@@ -177,6 +180,7 @@
#define MT_CPUPM_SUBCRIBE_CLUSTER_PWR_OFF(_fn)
#define MT_CPUPM_SUBCRIBE_MCUSYS_PWR_ON(_fn)
#define MT_CPUPM_SUBCRIBE_MCUSYS_PWR_OFF(_fn)
+#define MT_CPUPM_SUBCRIBE_EL3_UPTIME_SYNC_WITH_KERNEL(_fn)
#endif
/*
@@ -201,8 +205,10 @@
#define MT_PLAT_PWR_STATE_SYSTEM_BUS (0x0050)
#define MT_PLAT_PWR_STATE_SUSPEND (0x00f0)
-#define IS_MT_PLAT_PWR_STATE(state, target_state) ((state & target_state) == target_state)
-#define IS_MT_PLAT_PWR_STATE_MCUSYS(state) IS_MT_PLAT_PWR_STATE(state, MT_PLAT_PWR_STATE_MCUSYS)
+#define IS_MT_PLAT_PWR_STATE(state, target_state) \
+ ((state & target_state) == target_state)
+#define IS_MT_PLAT_PWR_STATE_MCUSYS(state) \
+ IS_MT_PLAT_PWR_STATE(state, MT_PLAT_PWR_STATE_MCUSYS)
#define PLAT_MT_SYSTEM_SUSPEND PLAT_MAX_OFF_STATE
#define PLAT_MT_CPU_SUSPEND_CLUSTER PLAT_MAX_RET_STATE
diff --git a/plat/mediatek/mt8196/include/platform_def.h b/plat/mediatek/mt8196/include/platform_def.h
index a19fad7..362f93f 100644
--- a/plat/mediatek/mt8196/include/platform_def.h
+++ b/plat/mediatek/mt8196/include/platform_def.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2024, Mediatek Inc. All rights reserved.
+ * Copyright (c) 2025, Mediatek Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -19,6 +19,9 @@
#define MCUCFG_REG_SIZE (0x50000)
#define IO_PHYS (0x10000000)
+#define MT_UTILITYBUS_BASE (0x0C800000)
+#define MT_UTILITYBUS_SIZE (0x800000)
+
/* Aggregate of all devices for MMU mapping */
#define MTK_DEV_RNG1_BASE (IO_PHYS)
#define MTK_DEV_RNG1_SIZE (0x10000000)
@@ -65,6 +68,20 @@
* SPM related constants
******************************************************************************/
#define SPM_BASE (IO_PHYS + 0x0C004000)
+#define SPM_REG_SIZE (0x1000)
+#define SPM_SRAM_BASE (IO_PHYS + 0x0C00C000)
+#define SPM_SRAM_REG_SIZE (0x1000)
+#define SPM_PBUS_BASE (IO_PHYS + 0x0C00D000)
+#define SPM_PBUS_REG_SIZE (0x1000)
+
+#ifdef SPM_BASE
+#define SPM_EXT_INT_WAKEUP_REQ (SPM_BASE + 0x210)
+#define SPM_EXT_INT_WAKEUP_REQ_SET (SPM_BASE + 0x214)
+#define SPM_EXT_INT_WAKEUP_REQ_CLR (SPM_BASE + 0x218)
+#define SPM_CPU_BUCK_ISO_CON (SPM_BASE + 0xEF8)
+#define SPM_CPU_BUCK_ISO_DEFAUT (0x0)
+#define SPM_AUDIO_PWR_CON (SPM_BASE + 0xE4C)
+#endif
/*******************************************************************************
* GPIO related constants
@@ -237,8 +254,16 @@
#define MAX_MMAP_REGIONS (512)
/*******************************************************************************
+ * CPU_EB TCM handling related constants
+ ******************************************************************************/
+#define CPU_EB_TCM_BASE 0x0C2CF000
+#define CPU_EB_TCM_SIZE 0x1000
+#define CPU_EB_MBOX3_OFFSET 0xFCE0
+#define CPU_EB_TCM_CNT_BASE 0x0C2CC000
+
+/*******************************************************************************
* CPU PM definitions
- *******************************************************************************/
+ ******************************************************************************/
#define PLAT_CPU_PM_B_BUCK_ISO_ID (6)
#define PLAT_CPU_PM_ILDO_ID (6)
#define CPU_IDLE_SRAM_BASE (0x11B000)
diff --git a/plat/mediatek/mt8196/plat_config.mk b/plat/mediatek/mt8196/plat_config.mk
index a983de3..e4a56c8 100644
--- a/plat/mediatek/mt8196/plat_config.mk
+++ b/plat/mediatek/mt8196/plat_config.mk
@@ -35,16 +35,18 @@
CONFIG_MTK_APUSYS_SEC_CTRL := y
CONFIG_MTK_APUSYS_SETUP_CE := y
CONFIG_MTK_MCUSYS := y
-MCUSYS_VERSION := v1
+MCUSYS_VERSION := v4
CONFIG_MTK_PM_SUPPORT := y
CONFIG_MTK_PM_ARCH := 9_0
CONFIG_MTK_CPU_PM_SUPPORT := y
CONFIG_MTK_CPU_PM_ARCH := 5_4
-CONFIG_MTK_SMP_EN := n
-CONFIG_MTK_CPU_SUSPEND_EN := y
+CONFIG_MTK_SMP_EN := y
+CONFIG_MTK_CPU_SUSPEND_EN := n
CONFIG_MTK_SPM_VERSION := mt8196
-CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND := y
+CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND := n
CONFIG_MTK_TINYSYS_VCP := y
+CPU_PWR_TOPOLOGY := group_4_3_1
+CPU_PM_CORE_ARCH64_ONLY := y
CPU_PM_TINYSYS_SUPPORT := y
MTK_PUBEVENT_ENABLE := y
CONFIG_MTK_PMIC := y
diff --git a/plat/mediatek/mt8196/platform.mk b/plat/mediatek/mt8196/platform.mk
index 0432e56..c8bfb47 100644
--- a/plat/mediatek/mt8196/platform.mk
+++ b/plat/mediatek/mt8196/platform.mk
@@ -14,6 +14,7 @@
include lib/xlat_tables_v2/xlat_tables.mk
PLAT_INCLUDES := -I${MTK_PLAT}/common \
+ -I${MTK_PLAT}/drivers/cpu_pm/topology/inc \
-I${MTK_PLAT}/drivers/gpio/ \
-I${MTK_PLAT}/include \
-I${MTK_PLAT}/include/${ARCH_VERSION} \
@@ -33,6 +34,7 @@
MODULES-y += $(MTK_PLAT)/drivers/vcp
MODULES-y += $(MTK_PLAT)/helpers
MODULES-y += $(MTK_PLAT)/topology
+MODULES-$(CONFIG_MTK_CPU_PM_SUPPORT) += $(MTK_PLAT)/drivers/cpu_pm
MODULES-$(CONFIG_MTK_PMIC) += $(MTK_PLAT)/drivers/pmic
MODULES-$(CONFIG_MTK_SPMI) += $(MTK_PLAT)/drivers/spmi