blob: 88e2a1406d94adb97f43392d020baf3757af4d8d [file] [log] [blame]
developera4938652022-09-05 16:36:31 +08001/*
2 * Copyright (c) 2022, Mediatek Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9
10#include <common/debug.h>
11#include <drivers/arm/gicv3.h>
12#include <lib/psci/psci.h>
13#include <lib/utils.h>
14#ifdef MTK_PUBEVENT_ENABLE
15#include <mtk_event/mtk_pubsub_events.h>
16#endif
17#include <plat/arm/common/plat_arm.h>
18#include <plat/common/platform.h>
19
20#include <lib/mtk_init/mtk_init.h>
21#include <lib/pm/mtk_pm.h>
22#include <mt_gic_v3.h>
23#include <platform_def.h>
24
25#define IS_AFFLV_PUBEVENT(_pstate) \
26 ((_pstate & (MT_CPUPM_PWR_DOMAIN_MCUSYS | MT_CPUPM_PWR_DOMAIN_CLUSTER)) != 0)
27
28#ifdef MTK_PUBEVENT_ENABLE
29#define MT_CPUPM_EVENT_PWR_ON(x) ({ \
30 PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_on, (const void *)(x)); })
31
32#define MT_CPUPM_EVENT_PWR_OFF(x) ({ \
33 PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_off, (const void *)(x)); })
34
35#define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ \
36 PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_on, (const void *)(x)); })
37
38#define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ \
39 PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_off, (const void *)(x)); })
40
41#else
42#define MT_CPUPM_EVENT_PWR_ON(x) ({ (void)x; })
43#define MT_CPUPM_EVENT_PWR_OFF(x) ({ (void)x; })
44#define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ (void)x; })
45#define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ (void)x; })
46#endif
47
48/*
49 * The cpu require to cluster power stattus
50 * [0] : The cpu require cluster power down
51 * [1] : The cpu require cluster power on
52 */
53#define coordinate_cluster(onoff) write_clusterpwrdn_el1(onoff)
54#define coordinate_cluster_pwron() coordinate_cluster(1)
55#define coordinate_cluster_pwroff() coordinate_cluster(0)
56
57/* defaultly disable all functions */
58#define MTK_CPUPM_FN_MASK_DEFAULT (0)
59
60struct mtk_cpu_pwr_ctrl {
61 unsigned int fn_mask;
62 struct mtk_cpu_pm_ops *ops;
63 struct mtk_cpu_smp_ops *smp;
64};
65
66static struct mtk_cpu_pwr_ctrl mtk_cpu_pwr = {
67 .fn_mask = MTK_CPUPM_FN_MASK_DEFAULT,
68 .ops = NULL,
69};
70
71#define IS_CPUIDLE_FN_ENABLE(x) ((mtk_cpu_pwr.ops != NULL) && ((mtk_cpu_pwr.fn_mask & x) != 0))
72#define IS_CPUSMP_FN_ENABLE(x) ((mtk_cpu_pwr.smp != NULL) && ((mtk_cpu_pwr.fn_mask & x) != 0))
73
74/* per-cpu power state */
75static unsigned int armv8_2_power_state[PLATFORM_CORE_COUNT];
76
77#define armv8_2_get_pwr_stateid(cpu) psci_get_pstate_id(armv8_2_power_state[cpu])
78
79static unsigned int get_mediatek_pstate(unsigned int domain, unsigned int psci_state,
80 struct mtk_cpupm_pwrstate *state)
81{
82 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_CPUPM_GET_PWR_STATE)) {
83 return mtk_cpu_pwr.ops->get_pstate(domain, psci_state, state);
84 }
85
86 return 0;
87}
88
89unsigned int armv8_2_get_pwr_afflv(const psci_power_state_t *state_info)
90{
91 int i;
92
93 for (i = (int)PLAT_MAX_PWR_LVL; i >= (int)PSCI_CPU_PWR_LVL; i--) {
94 if (is_local_state_run(state_info->pwr_domain_state[i]) == 0) {
95 return (unsigned int) i;
96 }
97 }
98
99 return PSCI_INVALID_PWR_LVL;
100}
101
102/* MediaTek mcusys power on control interface */
103static void armv8_2_mcusys_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
104{
105 mt_gic_init();
106 mt_gic_distif_restore();
107 gic_sgi_restore_all();
108
109 /* Add code here that behavior before system enter mcusys'on */
110 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_MCUSYS)) {
111 mtk_cpu_pwr.ops->mcusys_resume(state);
112 }
113}
114
115/* MediaTek mcusys power down control interface */
116static void armv8_2_mcusys_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
117{
118 mt_gic_distif_save();
119 gic_sgi_save_all();
120
121 /* Add code here that behaves before entering mcusys off */
122 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_MCUSYS)) {
123 mtk_cpu_pwr.ops->mcusys_suspend(state);
124 }
125}
126
127/* MediaTek Cluster power on control interface */
128static void armv8_2_cluster_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
129{
130 /* Add code here that behavior before system enter cluster'on */
131#if defined(MTK_CM_MGR) && !defined(MTK_FPGA_EARLY_PORTING)
132 /* init cpu stall counter */
133 init_cpu_stall_counter_all();
134#endif
135
136 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CLUSTER)) {
137 mtk_cpu_pwr.ops->cluster_resume(state);
138 }
139}
140
141/* MediaTek Cluster power down control interface */
142static void armv8_2_cluster_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
143{
144 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CLUSTER)) {
145 mtk_cpu_pwr.ops->cluster_suspend(state);
146 }
147}
148
149/* MediaTek CPU power on control interface */
150static void armv8_2_cpu_pwr_on_common(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
151{
152 coordinate_cluster_pwron();
153
154 gicv3_rdistif_on(plat_my_core_pos());
155 gicv3_cpuif_enable(plat_my_core_pos());
156 mt_gic_rdistif_init();
157
158 /* If MCUSYS has been powered down then restore GIC redistributor for all CPUs. */
159 if (IS_PLAT_SYSTEM_RETENTION(state->pwr.afflv)) {
160 mt_gic_rdistif_restore_all();
161 } else {
162 mt_gic_rdistif_restore();
163 }
164}
165
166/* MediaTek CPU power down control interface */
167static void armv8_2_cpu_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
168{
169 if ((pstate & MT_CPUPM_PWR_DOMAIN_PERCORE_DSU) != 0) {
170 coordinate_cluster_pwroff();
171 }
172
173 mt_gic_rdistif_save();
174 gicv3_cpuif_disable(plat_my_core_pos());
175 gicv3_rdistif_off(plat_my_core_pos());
176}
177
178static void armv8_2_cpu_pwr_resume(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
179{
180 armv8_2_cpu_pwr_on_common(state, pstate);
181 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CORE)) {
182 mtk_cpu_pwr.ops->cpu_resume(state);
183 }
184}
185
186static void armv8_2_cpu_pwr_suspend(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
187{
188 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CORE)) {
189 mtk_cpu_pwr.ops->cpu_suspend(state);
190 }
191 armv8_2_cpu_pwr_dwn_common(state, pstate);
192}
193
194static void armv8_2_cpu_pwr_on(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
195{
196 armv8_2_cpu_pwr_on_common(state, pstate);
197
198 if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_ON)) {
199 mtk_cpu_pwr.smp->cpu_on(state);
200 }
201}
202
203static void armv8_2_cpu_pwr_off(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
204{
205 if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_OFF)) {
206 mtk_cpu_pwr.smp->cpu_off(state);
207 }
208 armv8_2_cpu_pwr_dwn_common(state, pstate);
209}
210
211/* MediaTek PSCI power domain */
212static int armv8_2_power_domain_on(u_register_t mpidr)
213{
214 int ret = PSCI_E_SUCCESS;
215 int cpu = plat_core_pos_by_mpidr(mpidr);
216 uintptr_t entry = plat_pm_get_warm_entry();
217
218 if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_PWR_ON_CORE_PREPARE)) {
219 if (mtk_cpu_pwr.smp->cpu_pwr_on_prepare(cpu, entry) != 0) {
220 ret = PSCI_E_DENIED;
221 }
222 }
223 INFO("CPU %u power domain prepare on\n", cpu);
224 return ret;
225}
226
227/* MediaTek PSCI power domain */
228static void armv8_2_power_domain_on_finish(const psci_power_state_t *state)
229{
230 struct mt_cpupm_event_data nb;
231 unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE | MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
232 struct mtk_cpupm_pwrstate pm_state = {
233 .info = {
234 .cpuid = plat_my_core_pos(),
235 .mode = MTK_CPU_PM_SMP,
236 },
237 .pwr = {
238 .afflv = armv8_2_get_pwr_afflv(state),
239 .state_id = 0x0,
240 },
241 };
242
243 armv8_2_cpu_pwr_on(&pm_state, pstate);
244
245 nb.cpuid = pm_state.info.cpuid;
246 nb.pwr_domain = pstate;
247 MT_CPUPM_EVENT_PWR_ON(&nb);
248
249 INFO("CPU %u power domain on finished\n", pm_state.info.cpuid);
250}
251
252/* MediaTek PSCI power domain */
253static void armv8_2_power_domain_off(const psci_power_state_t *state)
254{
255 struct mt_cpupm_event_data nb;
256 unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE | MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
257 struct mtk_cpupm_pwrstate pm_state = {
258 .info = {
259 .cpuid = plat_my_core_pos(),
260 .mode = MTK_CPU_PM_SMP,
261 },
262 .pwr = {
263 .afflv = armv8_2_get_pwr_afflv(state),
264 .state_id = 0x0,
265 },
266 };
267 armv8_2_cpu_pwr_off(&pm_state, pstate);
268
269 nb.cpuid = pm_state.info.cpuid;
270 nb.pwr_domain = pstate;
271 MT_CPUPM_EVENT_PWR_OFF(&nb);
272
273 INFO("CPU %u power domain off\n", pm_state.info.cpuid);
274}
275
276/* MediaTek PSCI power domain */
277static void armv8_2_power_domain_suspend(const psci_power_state_t *state)
278{
279 unsigned int pstate = 0;
280 struct mt_cpupm_event_data nb;
281 struct mtk_cpupm_pwrstate pm_state = {
282 .info = {
283 .cpuid = plat_my_core_pos(),
284 .mode = MTK_CPU_PM_CPUIDLE,
285 },
286 };
287
288 pm_state.pwr.state_id = armv8_2_get_pwr_stateid(pm_state.info.cpuid);
289 pm_state.pwr.afflv = armv8_2_get_pwr_afflv(state);
290 pm_state.pwr.raw = state;
291
292 pstate = get_mediatek_pstate(CPUPM_PWR_OFF,
293 armv8_2_power_state[pm_state.info.cpuid], &pm_state);
294
295 armv8_2_cpu_pwr_suspend(&pm_state, pstate);
296
297 if ((pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER) != 0) {
298 armv8_2_cluster_pwr_dwn_common(&pm_state);
299 }
300
301 if ((pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS) != 0) {
302 armv8_2_mcusys_pwr_dwn_common(&pm_state);
303 }
304
305 nb.cpuid = pm_state.info.cpuid;
306 nb.pwr_domain = pstate;
307 MT_CPUPM_EVENT_PWR_OFF(&nb);
308
309 if (IS_AFFLV_PUBEVENT(pstate)) {
310 MT_CPUPM_EVENT_AFFLV_PWR_OFF(&nb);
311 }
312}
313
314/* MediaTek PSCI power domain */
315static void armv8_2_power_domain_suspend_finish(const psci_power_state_t *state)
316{
317 unsigned int pstate = 0;
318 struct mt_cpupm_event_data nb;
319 struct mtk_cpupm_pwrstate pm_state = {
320 .info = {
321 .cpuid = plat_my_core_pos(),
322 .mode = MTK_CPU_PM_CPUIDLE,
323 },
324 };
325
326 pm_state.pwr.state_id = armv8_2_get_pwr_stateid(pm_state.info.cpuid);
327 pm_state.pwr.afflv = armv8_2_get_pwr_afflv(state);
328 pm_state.pwr.raw = state;
329
330 pstate = get_mediatek_pstate(CPUPM_PWR_ON,
331 armv8_2_power_state[pm_state.info.cpuid], &pm_state);
332
333 if ((pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS) != 0) {
334 armv8_2_mcusys_pwr_on_common(&pm_state);
335 }
336
337 if ((pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER) != 0) {
338 armv8_2_cluster_pwr_on_common(&pm_state);
339 }
340
341 armv8_2_cpu_pwr_resume(&pm_state, pstate);
342
343 nb.cpuid = pm_state.info.cpuid;
344 nb.pwr_domain = pstate;
345 MT_CPUPM_EVENT_PWR_ON(&nb);
346
347 if (IS_AFFLV_PUBEVENT(pstate)) {
348 MT_CPUPM_EVENT_AFFLV_PWR_ON(&nb);
349 }
350}
351
352/* MediaTek PSCI power domain */
353static int armv8_2_validate_power_state(unsigned int power_state, psci_power_state_t *req_state)
354{
355 unsigned int i;
356 unsigned int pstate = psci_get_pstate_type(power_state);
357 unsigned int aff_lvl = psci_get_pstate_pwrlvl(power_state);
358 unsigned int my_core_pos = plat_my_core_pos();
359
360 if (mtk_cpu_pwr.ops == NULL) {
361 return PSCI_E_INVALID_PARAMS;
362 }
363
364 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_PWR_STATE_VALID)) {
365 if (mtk_cpu_pwr.ops->pwr_state_valid(aff_lvl, pstate) != 0) {
366 return PSCI_E_INVALID_PARAMS;
367 }
368 }
369
370 if (pstate == PSTATE_TYPE_STANDBY) {
371 req_state->pwr_domain_state[0] = PLAT_MAX_RET_STATE;
372 } else {
373 for (i = PSCI_CPU_PWR_LVL; i <= aff_lvl; i++) {
374 req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
375 }
376 }
377 armv8_2_power_state[my_core_pos] = power_state;
378
379 return PSCI_E_SUCCESS;
380}
381
382/* MediaTek PSCI power domain */
383#if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
384static void armv8_2_get_sys_suspend_power_state(psci_power_state_t *req_state)
385{
386 unsigned int i;
387 int ret;
388 unsigned int power_state;
389 unsigned int my_core_pos = plat_my_core_pos();
390
391 ret = mtk_cpu_pwr.ops->pwr_state_valid(PLAT_MAX_PWR_LVL,
392 PSTATE_TYPE_POWERDOWN);
393
394 if (ret != MTK_CPUPM_E_OK) {
395 /* Avoid suspend due to platform is not ready. */
396 req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] =
397 PLAT_MAX_RET_STATE;
398 for (i = PSCI_CPU_PWR_LVL + 1; i <= PLAT_MAX_PWR_LVL; i++) {
399 req_state->pwr_domain_state[i] = PSCI_LOCAL_STATE_RUN;
400 }
401
402 power_state = psci_make_powerstate(0, PSTATE_TYPE_STANDBY, PSCI_CPU_PWR_LVL);
403 } else {
404 for (i = PSCI_CPU_PWR_LVL; i <= PLAT_MAX_PWR_LVL; i++) {
405 req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
406 }
407
408 power_state = psci_make_powerstate(MT_PLAT_PWR_STATE_SYSTEM_SUSPEND,
409 PSTATE_TYPE_POWERDOWN, PLAT_MAX_PWR_LVL);
410 }
411
412 armv8_2_power_state[my_core_pos] = power_state;
413 flush_dcache_range((uintptr_t)&armv8_2_power_state[my_core_pos],
414 sizeof(armv8_2_power_state[my_core_pos]));
415}
416#endif
417static void armv8_2_pm_smp_init(unsigned int cpu_id, uintptr_t entry_point)
418{
419 if (entry_point == 0) {
420 ERROR("%s, warm_entry_point is null\n", __func__);
421 panic();
422 }
423 if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_INIT)) {
424 mtk_cpu_pwr.smp->init(cpu_id, entry_point);
425 }
426 INFO("[%s:%d] - Initialize finished\n", __func__, __LINE__);
427}
428
429static struct plat_pm_pwr_ctrl armv8_2_pwr_ops = {
430 .pwr_domain_suspend = armv8_2_power_domain_suspend,
431 .pwr_domain_suspend_finish = armv8_2_power_domain_suspend_finish,
432 .validate_power_state = armv8_2_validate_power_state,
433#if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
434 .get_sys_suspend_power_state = armv8_2_get_sys_suspend_power_state,
435#endif
436};
437
438struct plat_pm_smp_ctrl armv8_2_smp_ops = {
439 .init = armv8_2_pm_smp_init,
440 .pwr_domain_on = armv8_2_power_domain_on,
441 .pwr_domain_off = armv8_2_power_domain_off,
442 .pwr_domain_on_finish = armv8_2_power_domain_on_finish,
443};
444
445#define ISSUE_CPU_PM_REG_FAIL(_success) ({ _success = false; assert(0); })
446
447#define CPM_PM_FN_CHECK(_fns, _ops, _id, _func, _result, _flag) ({ \
448 if ((_fns & _id)) { \
449 if (_ops->_func) \
450 _flag |= _id; \
451 else { \
452 ISSUE_CPU_PM_REG_FAIL(_result); \
453 } \
454 } })
455
456int register_cpu_pm_ops(unsigned int fn_flags, struct mtk_cpu_pm_ops *ops)
457{
458 bool success = true;
459 unsigned int fns = 0;
460
461 if ((ops == NULL) || (mtk_cpu_pwr.ops != NULL)) {
462 ERROR("[%s:%d] register cpu_pm fail !!\n", __FILE__, __LINE__);
463 return MTK_CPUPM_E_ERR;
464 }
465
466 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CORE,
467 cpu_resume, success, fns);
468
469 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CORE,
470 cpu_suspend, success, fns);
471
472 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CLUSTER,
473 cluster_resume, success, fns);
474
475 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CLUSTER,
476 cluster_suspend, success, fns);
477
478 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_MCUSYS,
479 mcusys_resume, success, fns);
480
481 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_MCUSYS,
482 mcusys_suspend, success, fns);
483
484 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_CPUPM_GET_PWR_STATE,
485 get_pstate, success, fns);
486
487 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_STATE_VALID,
488 pwr_state_valid, success, fns);
489
490 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_INIT,
491 init, success, fns);
492
493 if (success) {
494 mtk_cpu_pwr.ops = ops;
495 mtk_cpu_pwr.fn_mask |= fns;
496 plat_pm_ops_setup_pwr(&armv8_2_pwr_ops);
497 INFO("[%s:%d] CPU pwr ops register success, support:0x%x\n",
498 __func__, __LINE__, fns);
499 } else {
500 ERROR("[%s:%d] register cpu_pm ops fail !, fn:0x%x\n",
501 __func__, __LINE__, fn_flags);
502 assert(0);
503 }
504 return MTK_CPUPM_E_OK;
505}
506
507int register_cpu_smp_ops(unsigned int fn_flags, struct mtk_cpu_smp_ops *ops)
508{
509 bool success = true;
510 unsigned int fns = 0;
511
512 if ((ops == NULL) || (mtk_cpu_pwr.smp != NULL)) {
513 ERROR("[%s:%d] register cpu_smp fail !!\n", __FILE__, __LINE__);
514 return MTK_CPUPM_E_ERR;
515 }
516
517 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_INIT,
518 init, success, fns);
519
520 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_ON_CORE_PREPARE,
521 cpu_pwr_on_prepare, success, fns);
522
523 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_ON,
524 cpu_on, success, fns);
525
526 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_OFF,
527 cpu_off, success, fns);
528
529 if (success == true) {
530 mtk_cpu_pwr.smp = ops;
531 mtk_cpu_pwr.fn_mask |= fns;
532 plat_pm_ops_setup_smp(&armv8_2_smp_ops);
533 INFO("[%s:%d] CPU smp ops register success, support:0x%x\n",
534 __func__, __LINE__, fns);
535 } else {
536 ERROR("[%s:%d] register cpu_smp ops fail !, fn:0x%x\n",
537 __func__, __LINE__, fn_flags);
538 assert(0);
539 }
540 return MTK_CPUPM_E_OK;
541}