blob: 313ad470fa87de5bdbabe4669e9f1c06c78a48f9 [file] [log] [blame]
developer1d69df52022-09-05 17:36:36 +08001/*
2 * Copyright (c) 2022, MediaTek Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <stdint.h>
9
10#include <lib/spinlock.h>
11
12#include <lib/mtk_init/mtk_init.h>
13#include <lib/pm/mtk_pm.h>
14#include "mt_cpu_pm.h"
15#include "mt_cpu_pm_cpc.h"
16#include "mt_cpu_pm_mbox.h"
17#include <mt_lp_rm.h>
18#include "mt_smp.h"
19#include <mtk_mmap_pool.h>
20#include <platform_def.h>
21
22/*
23 * The locker must use the bakery locker when cache turns off.
24 * Using spin_lock will gain better performance.
25 */
26#ifdef MT_CPU_PM_USING_BAKERY_LOCK
27DEFINE_BAKERY_LOCK(mt_cpu_pm_lock);
28#define plat_cpu_pm_lock_init() bakery_lock_init(&mt_cpu_pm_lock)
29#define plat_cpu_pm_lock() bakery_lock_get(&mt_cpu_pm_lock)
30#define plat_cpu_pm_unlock() bakery_lock_release(&mt_cpu_pm_lock)
31#else
32spinlock_t mt_cpu_pm_lock;
33#define plat_cpu_pm_lock_init()
34#define plat_cpu_pm_lock() spin_lock(&mt_cpu_pm_lock)
35#define plat_cpu_pm_unlock() spin_unlock(&mt_cpu_pm_lock)
36#endif
37
38enum mt_pwr_node {
39 MT_PWR_NONMCUSYS = 0,
40 MT_PWR_MCUSYS_PDN,
41 MT_PWR_SUSPEND,
42 MT_PWR_SYSTEM_MEM,
43 MT_PWR_SYSTEM_PLL,
44 MT_PWR_SYSTEM_BUS,
45 MT_PWR_MAX,
46};
47
48#define CPU_PM_DEPD_INIT BIT(0)
49#define CPU_PM_DEPD_READY BIT(1)
50#define CPU_PM_PLAT_READY BIT(2)
51
52#ifdef CPU_PM_TINYSYS_SUPPORT
53#define CPU_PM_INIT_READY (CPU_PM_DEPD_INIT | CPU_PM_DEPD_READY)
54#define CPU_PM_LP_READY (CPU_PM_INIT_READY | CPU_PM_PLAT_READY)
55#else
56#define CPU_PM_LP_READY (CPU_PM_PLAT_READY)
57#endif
58
59#if CONFIG_MTK_PM_SUPPORT
60
61#if CONFIG_MTK_CPU_SUSPEND_EN || CONFIG_MTK_SMP_EN
62static void cpupm_cpu_resume_common(const struct mtk_cpupm_pwrstate *state)
63{
64 CPU_PM_ASSERT(state != NULL);
65 mtk_cpc_core_on_hint_clr(state->info.cpuid);
66}
67#endif
68
69#if CONFIG_MTK_SMP_EN
70static int cpupm_cpu_pwr_on_prepare(unsigned int cpu, uintptr_t entry)
71{
72 struct cpu_pwr_ctrl pwr_ctrl;
73
74 PER_CPU_PWR_CTRL(pwr_ctrl, cpu);
75 mt_smp_core_bootup_address_set(&pwr_ctrl, entry);
76 mt_smp_core_init_arch(0, cpu, 1, &pwr_ctrl);
77
78 return mt_smp_power_core_on(cpu, &pwr_ctrl);
79}
80
81static void cpupm_cpu_resume_smp(const struct mtk_cpupm_pwrstate *state)
82{
83 CPU_PM_ASSERT(state != NULL);
84
85 plat_cpu_pm_lock();
86 mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
87 GIC_WAKEUP_IGNORE(state->info.cpuid));
88 plat_cpu_pm_unlock();
89 cpupm_cpu_resume_common(state);
90}
91
92static void cpupm_cpu_suspend_smp(const struct mtk_cpupm_pwrstate *state)
93{
94 struct cpu_pwr_ctrl pwr_ctrl;
95
96 CPU_PM_ASSERT(state != NULL);
97
98 PER_CPU_PWR_CTRL(pwr_ctrl, state->info.cpuid);
99 mt_smp_power_core_off(&pwr_ctrl);
100 mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
101 GIC_WAKEUP_IGNORE(state->info.cpuid));
102}
103
104static void cpupm_smp_init(unsigned int cpu, uintptr_t sec_entrypoint)
105{
106 unsigned int reg;
107 struct mtk_cpupm_pwrstate state = {
108 .info = {
109 .cpuid = cpu,
110 .mode = MTK_CPU_PM_SMP,
111 },
112 .pwr = {
113 .afflv = 0,
114 .state_id = 0,
115 },
116 };
117
118 reg = mmio_read_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG);
119 if ((reg & CPC_MCUSYS_CPC_RESET_PWR_ON_EN) != 0) {
120 INFO("[%s:%d][CPU_PM] reset pwr on is enabled then clear it!\n",
121 __func__, __LINE__);
122 mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG, CPC_MCUSYS_CPC_RESET_PWR_ON_EN);
123 }
124
125 cpupm_cpu_pwr_on_prepare(cpu, sec_entrypoint);
126 cpupm_cpu_resume_smp(&state);
127}
128
129static struct mtk_cpu_smp_ops cpcv3_2_cpu_smp = {
130 .init = cpupm_smp_init,
131 .cpu_pwr_on_prepare = cpupm_cpu_pwr_on_prepare,
132 .cpu_on = cpupm_cpu_resume_smp,
133 .cpu_off = cpupm_cpu_suspend_smp,
134};
135
136#endif /* CONFIG_MTK_SMP_EN */
137
138#if CONFIG_MTK_CPU_SUSPEND_EN
139#define CPUPM_READY_MS (40000)
140#define CPUPM_ARCH_TIME_MS(ms) (ms * 1000 * SYS_COUNTER_FREQ_IN_MHZ)
141#define CPUPM_BOOTUP_TIME_THR CPUPM_ARCH_TIME_MS(CPUPM_READY_MS)
142
143static int mt_pwr_nodes[MT_PWR_MAX];
144static int plat_mt_lp_cpu_rc;
145static unsigned int cpu_pm_status;
146static unsigned int plat_prev_stateid;
147
148static int mcusys_prepare_suspend(const struct mtk_cpupm_pwrstate *state)
149{
150 unsigned int stateid = state->pwr.state_id;
151
152 if (mtk_cpc_mcusys_off_prepare() != CPC_SUCCESS) {
153 goto mt_pwr_mcusysoff_break;
154 }
155
156 if (!IS_PLAT_SUSPEND_ID(stateid)) {
157 if (mt_pwr_nodes[MT_PWR_SYSTEM_MEM] != 0) {
158 stateid = MT_PLAT_PWR_STATE_SYSTEM_MEM;
159 } else if (mt_pwr_nodes[MT_PWR_SYSTEM_PLL] != 0) {
160 stateid = MT_PLAT_PWR_STATE_SYSTEM_PLL;
161 } else if (mt_pwr_nodes[MT_PWR_SYSTEM_BUS] != 0) {
162 stateid = MT_PLAT_PWR_STATE_SYSTEM_BUS;
163 } else if (mt_pwr_nodes[MT_PWR_SUSPEND] != 0) {
164 stateid = MT_PLAT_PWR_STATE_SUSPEND;
165 } else {
166 stateid = MT_PLAT_PWR_STATE_MCUSYS;
167 }
168 }
169
170 plat_prev_stateid = stateid;
171 plat_mt_lp_cpu_rc = mt_lp_rm_find_and_run_constraint(0, state->info.cpuid, stateid, NULL);
172
173 if (plat_mt_lp_cpu_rc < 0) {
174 goto mt_pwr_mcusysoff_reflect;
175 }
176
177#ifdef CPU_PM_TINYSYS_SUPPORT
178 mtk_set_cpu_pm_preffered_cpu(state->info.cpuid);
179#endif
180 return MTK_CPUPM_E_OK;
181
182mt_pwr_mcusysoff_reflect:
183 mtk_cpc_mcusys_off_reflect();
184mt_pwr_mcusysoff_break:
185 plat_mt_lp_cpu_rc = -1;
186
187 return MTK_CPUPM_E_FAIL;
188}
189
190static int mcusys_prepare_resume(const struct mtk_cpupm_pwrstate *state)
191{
192 if (plat_mt_lp_cpu_rc < 0) {
193 return MTK_CPUPM_E_FAIL;
194 }
195
196 mt_lp_rm_reset_constraint(plat_mt_lp_cpu_rc, state->info.cpuid, plat_prev_stateid);
197 mtk_cpc_mcusys_off_reflect();
198 return MTK_CPUPM_E_OK;
199}
200
201static unsigned int cpupm_do_pstate_off(const mtk_pstate_type psci_state,
202 const struct mtk_cpupm_pwrstate *state)
203{
204 unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
205
206 if (!state || (state->pwr.afflv > PLAT_MAX_PWR_LVL)) {
207 CPU_PM_ASSERT(0);
208 }
209
210 switch (state->pwr.state_id) {
211 case MT_PLAT_PWR_STATE_SYSTEM_MEM:
212 mt_pwr_nodes[MT_PWR_SYSTEM_MEM] += 1;
213 break;
214 case MT_PLAT_PWR_STATE_SYSTEM_PLL:
215 mt_pwr_nodes[MT_PWR_SYSTEM_PLL] += 1;
216 break;
217 case MT_PLAT_PWR_STATE_SYSTEM_BUS:
218 mt_pwr_nodes[MT_PWR_SYSTEM_BUS] += 1;
219 break;
220 case MT_PLAT_PWR_STATE_SUSPEND:
221 mt_pwr_nodes[MT_PWR_SUSPEND] += 1;
222 break;
223 default:
224 if (!IS_MT_PLAT_PWR_STATE_MCUSYS(state->pwr.state_id) &&
225 !IS_PLAT_SYSTEM_SUSPEND(state->pwr.afflv)) {
226 plat_cpu_pm_lock();
227 mt_pwr_nodes[MT_PWR_NONMCUSYS] += 1;
228 flush_dcache_range((uintptr_t)&mt_pwr_nodes[MT_PWR_NONMCUSYS],
229 sizeof(mt_pwr_nodes[MT_PWR_NONMCUSYS]));
230 plat_cpu_pm_unlock();
231 }
232 break;
233 }
234
235 if ((mt_pwr_nodes[MT_PWR_NONMCUSYS] == 0) && IS_PLAT_MCUSYSOFF_AFFLV(state->pwr.afflv)) {
236 /* Prepare to power down mcusys */
237 if (mcusys_prepare_suspend(state) == MTK_CPUPM_E_OK) {
238 mt_pwr_nodes[MT_PWR_MCUSYS_PDN] += 1;
239 flush_dcache_range((uintptr_t)&mt_pwr_nodes[MT_PWR_MCUSYS_PDN],
240 sizeof(mt_pwr_nodes[MT_PWR_MCUSYS_PDN]));
241 pstate |= (MT_CPUPM_PWR_DOMAIN_MCUSYS | MT_CPUPM_PWR_DOMAIN_CLUSTER);
242 }
243 }
244
245 if (state->pwr.afflv >= PLAT_MT_CPU_SUSPEND_CLUSTER) {
246 pstate |= MT_CPUPM_PWR_DOMAIN_CLUSTER;
247 }
248
249 if (psci_get_pstate_pwrlvl(psci_state) >= PLAT_MT_CPU_SUSPEND_CLUSTER) {
250 pstate |= MT_CPUPM_PWR_DOMAIN_PERCORE_DSU;
251 }
252
253 return pstate;
254}
255
256static unsigned int cpupm_do_pstate_on(const mtk_pstate_type psci_state,
257 const struct mtk_cpupm_pwrstate *state)
258{
259 unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
260
261 CPU_PM_ASSERT(state != NULL);
262
263 if (state->pwr.afflv > PLAT_MAX_PWR_LVL) {
264 CPU_PM_ASSERT(0);
265 }
266
267 if (mt_pwr_nodes[MT_PWR_MCUSYS_PDN] != 0) {
268 mt_pwr_nodes[MT_PWR_MCUSYS_PDN] = 0;
269 flush_dcache_range((uintptr_t)&mt_pwr_nodes[MT_PWR_MCUSYS_PDN],
270 sizeof(mt_pwr_nodes[MT_PWR_MCUSYS_PDN]));
271 pstate |= (MT_CPUPM_PWR_DOMAIN_MCUSYS | MT_CPUPM_PWR_DOMAIN_CLUSTER);
272 mcusys_prepare_resume(state);
273 }
274
275 if (state->pwr.afflv >= PLAT_MT_CPU_SUSPEND_CLUSTER) {
276 pstate |= MT_CPUPM_PWR_DOMAIN_CLUSTER;
277 }
278
279 switch (state->pwr.state_id) {
280 case MT_PLAT_PWR_STATE_SYSTEM_MEM:
281 mt_pwr_nodes[MT_PWR_SYSTEM_MEM] -= 1;
282 CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_MEM] >= 0);
283 break;
284 case MT_PLAT_PWR_STATE_SYSTEM_PLL:
285 mt_pwr_nodes[MT_PWR_SYSTEM_PLL] -= 1;
286 CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_PLL] >= 0);
287 break;
288 case MT_PLAT_PWR_STATE_SYSTEM_BUS:
289 mt_pwr_nodes[MT_PWR_SYSTEM_BUS] -= 1;
290 CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_BUS] >= 0);
291 break;
292 case MT_PLAT_PWR_STATE_SUSPEND:
293 mt_pwr_nodes[MT_PWR_SUSPEND] -= 1;
294 CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SUSPEND] >= 0);
295 break;
296 default:
297 if (!IS_MT_PLAT_PWR_STATE_MCUSYS(state->pwr.state_id) &&
298 !IS_PLAT_SYSTEM_SUSPEND(state->pwr.afflv)) {
299 plat_cpu_pm_lock();
300 mt_pwr_nodes[MT_PWR_NONMCUSYS] -= 1;
301 flush_dcache_range((uintptr_t)&mt_pwr_nodes[MT_PWR_NONMCUSYS],
302 sizeof(mt_pwr_nodes[MT_PWR_NONMCUSYS]));
303 plat_cpu_pm_unlock();
304 }
305 break;
306 }
307
308 if (IS_PLAT_SYSTEM_SUSPEND(state->pwr.afflv) ||
309 (IS_PLAT_SYSTEM_RETENTION(state->pwr.afflv) && (mt_pwr_nodes[MT_PWR_SUSPEND] > 0))) {
310 mtk_cpc_time_sync();
311 }
312
313 if (mt_pwr_nodes[MT_PWR_NONMCUSYS] < 0) {
314 CPU_PM_ASSERT(0);
315 }
316
317 pstate |= MT_CPUPM_PWR_DOMAIN_PERCORE_DSU;
318
319 return pstate;
320}
321
322static void cpupm_cpu_resume(const struct mtk_cpupm_pwrstate *state)
323{
324 cpupm_cpu_resume_common(state);
325}
326
327static void cpupm_mcusys_resume(const struct mtk_cpupm_pwrstate *state)
328{
329 assert(state != NULL);
330}
331
332static void cpupm_mcusys_suspend(const struct mtk_cpupm_pwrstate *state)
333{
334 assert(state != NULL);
335}
336
337static unsigned int cpupm_get_pstate(enum mt_cpupm_pwr_domain domain,
338 const mtk_pstate_type psci_state,
339 const struct mtk_cpupm_pwrstate *state)
340{
341 unsigned int pstate = 0;
342
343 if (state == NULL) {
344 return 0;
345 }
346
347 if (state->info.mode == MTK_CPU_PM_SMP) {
348 pstate = MT_CPUPM_PWR_DOMAIN_CORE;
349 } else {
350 if (domain == CPUPM_PWR_OFF) {
351 pstate = cpupm_do_pstate_off(psci_state, state);
352 } else if (domain == CPUPM_PWR_ON) {
353 pstate = cpupm_do_pstate_on(psci_state, state);
354 } else {
355 INFO("[%s:%d][CPU_PM] unknown pwr domain :%d\n",
356 __func__, __LINE__, domain);
357 assert(0);
358 }
359 }
360 return pstate;
361}
362
363static int cpupm_init(void)
364{
365 int ret = MTK_CPUPM_E_OK;
366
367#ifdef CPU_PM_TINYSYS_SUPPORT
368 int status;
369
370 if ((cpu_pm_status & CPU_PM_INIT_READY) == CPU_PM_INIT_READY) {
371 return MTK_CPUPM_E_OK;
372 }
373
374 if (!(cpu_pm_status & CPU_PM_DEPD_INIT)) {
375 status = mtk_lp_depd_condition(CPUPM_MBOX_WAIT_DEV_INIT);
376 if (status == 0) {
377 plat_cpu_pm_lock();
378 cpu_pm_status |= CPU_PM_DEPD_INIT;
379 plat_cpu_pm_unlock();
380 }
381 }
382
383 if ((cpu_pm_status & CPU_PM_DEPD_INIT) && !(cpu_pm_status & CPU_PM_DEPD_READY)) {
384 status = mtk_lp_depd_condition(CPUPM_MBOX_WAIT_TASK_READY);
385 if (status == 0) {
386 plat_cpu_pm_lock();
387 cpu_pm_status |= CPU_PM_DEPD_READY;
388 plat_cpu_pm_unlock();
389 }
390 }
391
392 ret = ((cpu_pm_status & CPU_PM_INIT_READY) == CPU_PM_INIT_READY) ?
393 MTK_CPUPM_E_OK : MTK_CPUPM_E_FAIL;
394#endif
395 return ret;
396}
397
398static int cpupm_pwr_state_valid(unsigned int afflv, unsigned int state)
399{
400 if (cpu_pm_status == CPU_PM_LP_READY) {
401 return MTK_CPUPM_E_OK;
402 }
403
404 if (cpupm_init() != MTK_CPUPM_E_OK) {
405 return MTK_CPUPM_E_FAIL;
406 }
407
408 if (read_cntpct_el0() >= (uint64_t)CPUPM_BOOTUP_TIME_THR) {
409 plat_cpu_pm_lock();
410 cpu_pm_status |= CPU_PM_PLAT_READY;
411 plat_cpu_pm_unlock();
412 }
413
414 if (!IS_PLAT_SYSTEM_SUSPEND(afflv) && (cpu_pm_status & CPU_PM_PLAT_READY) == 0) {
415 return MTK_CPUPM_E_FAIL;
416 }
417
418 return MTK_CPUPM_E_OK;
419}
420
421static struct mtk_cpu_pm_ops cpcv3_2_mcdi = {
422 .get_pstate = cpupm_get_pstate,
423 .pwr_state_valid = cpupm_pwr_state_valid,
424 .cpu_resume = cpupm_cpu_resume,
425 .mcusys_suspend = cpupm_mcusys_suspend,
426 .mcusys_resume = cpupm_mcusys_resume,
427};
428#endif /* CONFIG_MTK_CPU_SUSPEND_EN */
429
430#endif /* CONFIG_MTK_PM_SUPPORT */
431
432/*
433 * Depend on mtk pm methodology, the psci op init must
434 * be invoked after cpu pm to avoid initialization fail.
435 */
436int mt_plat_cpu_pm_init(void)
437{
438 plat_cpu_pm_lock_init();
439
440 mtk_cpc_init();
441#if CONFIG_MTK_PM_SUPPORT
442
443#if CONFIG_MTK_CPU_SUSPEND_EN
444 register_cpu_pm_ops(CPU_PM_FN, &cpcv3_2_mcdi);
445#endif /* CONFIG_MTK_CPU_SUSPEND_EN */
446
447#if CONFIG_MTK_SMP_EN
448 register_cpu_smp_ops(CPU_PM_FN, &cpcv3_2_cpu_smp);
449#endif /* CONFIG_MTK_SMP_EN */
450
451#endif /* CONFIG_MTK_PM_SUPPORT */
452
453 INFO("[%s:%d] - CPU PM INIT finished\n", __func__, __LINE__);
454 return 0;
455}
456MTK_ARCH_INIT(mt_plat_cpu_pm_init);
457
458static const mmap_region_t cpu_pm_mmap[] MTK_MMAP_SECTION = {
459#ifdef CPU_PM_TINYSYS_SUPPORT
460#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
461 MAP_REGION_FLAT(CPU_EB_TCM_BASE, CPU_EB_TCM_SIZE, MT_DEVICE | MT_RW | MT_SECURE),
462#endif
463#endif
464 {0}
465};
466DECLARE_MTK_MMAP_REGIONS(cpu_pm_mmap);