blob: d727967e85d0736ca1b1ca8445f25b1e44b252c2 [file] [log] [blame]
developer65014b82015-04-13 14:47:57 +08001/*
2 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
developer65014b82015-04-13 14:47:57 +080032#include <assert.h>
33#include <bakery_lock.h>
34#include <cci.h>
35#include <console.h>
36#include <debug.h>
37#include <errno.h>
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +080038#include <gicv2.h>
developer65014b82015-04-13 14:47:57 +080039#include <mcucfg.h>
40#include <mmio.h>
41#include <mt8173_def.h>
42#include <mt_cpuxgpt.h> /* generic_timer_backup() */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +080043#include <plat_arm.h>
developer65014b82015-04-13 14:47:57 +080044#include <plat_private.h>
45#include <power_tracer.h>
46#include <psci.h>
47#include <rtc.h>
48#include <scu.h>
49#include <spm_hotplug.h>
50#include <spm_mcdi.h>
51#include <spm_suspend.h>
52
Koan-Sin Tanbc998072017-01-19 16:43:49 +080053#if !ENABLE_PLAT_COMPAT
54#define MTK_PWR_LVL0 0
55#define MTK_PWR_LVL1 1
56#define MTK_PWR_LVL2 2
57
58/* Macros to read the MTK power domain state */
59#define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0]
60#define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1]
61#define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\
62 (state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
63#endif
64
Koan-Sin Tan22ea87c2016-04-18 14:28:03 +080065#if PSCI_EXTENDED_STATE_ID
66/*
67 * The table storing the valid idle power states. Ensure that the
68 * array entries are populated in ascending order of state-id to
69 * enable us to use binary search during power state validation.
70 * The table must be terminated by a NULL entry.
71 */
72const unsigned int mtk_pm_idle_states[] = {
73 /* State-id - 0x001 */
74 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
75 MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY),
76 /* State-id - 0x002 */
77 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
78 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
79 /* State-id - 0x022 */
80 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF,
81 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
82#if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1
83 /* State-id - 0x222 */
84 mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF,
85 MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
86#endif
87 0,
88};
89#endif
90
developer65014b82015-04-13 14:47:57 +080091struct core_context {
92 unsigned long timer_data[8];
93 unsigned int count;
94 unsigned int rst;
95 unsigned int abt;
96 unsigned int brk;
97};
98
99struct cluster_context {
100 struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
101};
102
103/*
104 * Top level structure to hold the complete context of a multi cluster system
105 */
106struct system_context {
107 struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
108};
109
110/*
111 * Top level structure which encapsulates the context of the entire system
112 */
113static struct system_context dormant_data[1];
114
115static inline struct cluster_context *system_cluster(
116 struct system_context *system,
117 uint32_t clusterid)
118{
119 return &system->cluster[clusterid];
120}
121
122static inline struct core_context *cluster_core(struct cluster_context *cluster,
123 uint32_t cpuid)
124{
125 return &cluster->core[cpuid];
126}
127
128static struct cluster_context *get_cluster_data(unsigned long mpidr)
129{
130 uint32_t clusterid;
131
132 clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
133
134 return system_cluster(dormant_data, clusterid);
135}
136
137static struct core_context *get_core_data(unsigned long mpidr)
138{
139 struct cluster_context *cluster;
140 uint32_t cpuid;
141
142 cluster = get_cluster_data(mpidr);
143 cpuid = mpidr & MPIDR_CPU_MASK;
144
145 return cluster_core(cluster, cpuid);
146}
147
148static void mt_save_generic_timer(unsigned long *container)
149{
150 uint64_t ctl;
151 uint64_t val;
152
153 __asm__ volatile("mrs %x0, cntkctl_el1\n\t"
154 "mrs %x1, cntp_cval_el0\n\t"
155 "stp %x0, %x1, [%2, #0]"
156 : "=&r" (ctl), "=&r" (val)
157 : "r" (container)
158 : "memory");
159
160 __asm__ volatile("mrs %x0, cntp_tval_el0\n\t"
161 "mrs %x1, cntp_ctl_el0\n\t"
162 "stp %x0, %x1, [%2, #16]"
163 : "=&r" (val), "=&r" (ctl)
164 : "r" (container)
165 : "memory");
166
167 __asm__ volatile("mrs %x0, cntv_tval_el0\n\t"
168 "mrs %x1, cntv_ctl_el0\n\t"
169 "stp %x0, %x1, [%2, #32]"
170 : "=&r" (val), "=&r" (ctl)
171 : "r" (container)
172 : "memory");
173}
174
175static void mt_restore_generic_timer(unsigned long *container)
176{
177 uint64_t ctl;
178 uint64_t val;
179
180 __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t"
181 "msr cntkctl_el1, %x0\n\t"
182 "msr cntp_cval_el0, %x1"
183 : "=&r" (ctl), "=&r" (val)
184 : "r" (container)
185 : "memory");
186
187 __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t"
188 "msr cntp_tval_el0, %x0\n\t"
189 "msr cntp_ctl_el0, %x1"
190 : "=&r" (val), "=&r" (ctl)
191 : "r" (container)
192 : "memory");
193
194 __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t"
195 "msr cntv_tval_el0, %x0\n\t"
196 "msr cntv_ctl_el0, %x1"
197 : "=&r" (val), "=&r" (ctl)
198 : "r" (container)
199 : "memory");
200}
201
202static inline uint64_t read_cntpctl(void)
203{
204 uint64_t cntpctl;
205
206 __asm__ volatile("mrs %x0, cntp_ctl_el0"
207 : "=r" (cntpctl) : : "memory");
208
209 return cntpctl;
210}
211
212static inline void write_cntpctl(uint64_t cntpctl)
213{
214 __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl));
215}
216
217static void stop_generic_timer(void)
218{
219 /*
220 * Disable the timer and mask the irq to prevent
221 * suprious interrupts on this cpu interface. It
222 * will bite us when we come back if we don't. It
223 * will be replayed on the inbound cluster.
224 */
225 uint64_t cntpctl = read_cntpctl();
226
227 write_cntpctl(clr_cntp_ctl_enable(cntpctl));
228}
229
230static void mt_cpu_save(unsigned long mpidr)
231{
232 struct core_context *core;
233
234 core = get_core_data(mpidr);
235 mt_save_generic_timer(core->timer_data);
236
237 /* disable timer irq, and upper layer should enable it again. */
238 stop_generic_timer();
239}
240
241static void mt_cpu_restore(unsigned long mpidr)
242{
243 struct core_context *core;
244
245 core = get_core_data(mpidr);
246 mt_restore_generic_timer(core->timer_data);
247}
248
249static void mt_platform_save_context(unsigned long mpidr)
250{
251 /* mcusys_save_context: */
252 mt_cpu_save(mpidr);
253}
254
255static void mt_platform_restore_context(unsigned long mpidr)
256{
257 /* mcusys_restore_context: */
258 mt_cpu_restore(mpidr);
259}
260
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800261#if ENABLE_PLAT_COMPAT
developer65014b82015-04-13 14:47:57 +0800262/*******************************************************************************
263* Private function which is used to determine if any platform actions
264* should be performed for the specified affinity instance given its
265* state. Nothing needs to be done if the 'state' is not off or if this is not
266* the highest affinity level which will enter the 'state'.
267*******************************************************************************/
268static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
269{
270 unsigned int max_phys_off_afflvl;
271
272 assert(afflvl <= MPIDR_AFFLVL2);
273
274 if (state != PSCI_STATE_OFF)
275 return -EAGAIN;
276
277 /*
278 * Find the highest affinity level which will be suspended and postpone
279 * all the platform specific actions until that level is hit.
280 */
281 max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
282 assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
283 if (afflvl != max_phys_off_afflvl)
284 return -EAGAIN;
285
286 return 0;
287}
288
289/*******************************************************************************
290 * MTK_platform handler called when an affinity instance is about to enter
291 * standby.
292 ******************************************************************************/
293static void plat_affinst_standby(unsigned int power_state)
294{
295 unsigned int target_afflvl;
296
297 /* Sanity check the requested state */
298 target_afflvl = psci_get_pstate_afflvl(power_state);
299
300 /*
301 * It's possible to enter standby only on affinity level 0 i.e. a cpu
302 * on the MTK_platform. Ignore any other affinity level.
303 */
304 if (target_afflvl == MPIDR_AFFLVL0) {
305 /*
306 * Enter standby state. dsb is good practice before using wfi
307 * to enter low power states.
308 */
309 dsb();
310 wfi();
311 }
312}
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800313#else
314static void plat_cpu_standby(plat_local_state_t cpu_state)
315{
316 unsigned int scr;
317
318 scr = read_scr_el3();
319 write_scr_el3(scr | SCR_IRQ_BIT);
320 isb();
321 dsb();
322 wfi();
323 write_scr_el3(scr);
324}
325#endif
developer65014b82015-04-13 14:47:57 +0800326
327/*******************************************************************************
328 * MTK_platform handler called when an affinity instance is about to be turned
329 * on. The level and mpidr determine the affinity instance.
330 ******************************************************************************/
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800331#if ENABLE_PLAT_COMPAT
developer65014b82015-04-13 14:47:57 +0800332static int plat_affinst_on(unsigned long mpidr,
333 unsigned long sec_entrypoint,
334 unsigned int afflvl,
335 unsigned int state)
336{
337 int rc = PSCI_E_SUCCESS;
338 unsigned long cpu_id;
339 unsigned long cluster_id;
340 uintptr_t rv;
341
342 /*
343 * It's possible to turn on only affinity level 0 i.e. a cpu
344 * on the MTK_platform. Ignore any other affinity level.
345 */
346 if (afflvl != MPIDR_AFFLVL0)
347 return rc;
348
349 cpu_id = mpidr & MPIDR_CPU_MASK;
350 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
351
352 if (cluster_id)
353 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
354 else
355 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
356
357 mmio_write_32(rv, sec_entrypoint);
358 INFO("mt_on[%ld:%ld], entry %x\n",
359 cluster_id, cpu_id, mmio_read_32(rv));
360
361 spm_hotplug_on(mpidr);
362
363 return rc;
364}
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800365#else
366static uintptr_t secure_entrypoint;
367
368static int plat_power_domain_on(unsigned long mpidr)
369{
370 int rc = PSCI_E_SUCCESS;
371 unsigned long cpu_id;
372 unsigned long cluster_id;
373 uintptr_t rv;
374
375 cpu_id = mpidr & MPIDR_CPU_MASK;
376 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
377
378 if (cluster_id)
379 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
380 else
381 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
382
383 mmio_write_32(rv, secure_entrypoint);
384 INFO("mt_on[%ld:%ld], entry %x\n",
385 cluster_id, cpu_id, mmio_read_32(rv));
386
387 spm_hotplug_on(mpidr);
388 return rc;
389}
390#endif
developer65014b82015-04-13 14:47:57 +0800391
392/*******************************************************************************
393 * MTK_platform handler called when an affinity instance is about to be turned
394 * off. The level and mpidr determine the affinity instance. The 'state' arg.
395 * allows the platform to decide whether the cluster is being turned off and
396 * take apt actions.
397 *
398 * CAUTION: This function is called with coherent stacks so that caches can be
399 * turned off, flushed and coherency disabled. There is no guarantee that caches
400 * will remain turned on across calls to this function as each affinity level is
401 * dealt with. So do not write & read global variables across calls. It will be
402 * wise to do flush a write to the global to prevent unpredictable results.
403 ******************************************************************************/
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800404#if ENABLE_PLAT_COMPAT
developer65014b82015-04-13 14:47:57 +0800405static void plat_affinst_off(unsigned int afflvl, unsigned int state)
406{
407 unsigned long mpidr = read_mpidr_el1();
408
409 /* Determine if any platform actions need to be executed. */
410 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
411 return;
412
413 /* Prevent interrupts from spuriously waking up this cpu */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800414 gicv2_cpuif_disable();
developer65014b82015-04-13 14:47:57 +0800415
416 spm_hotplug_off(mpidr);
417
418 trace_power_flow(mpidr, CPU_DOWN);
419
420 if (afflvl != MPIDR_AFFLVL0) {
421 /* Disable coherency if this cluster is to be turned off */
422 plat_cci_disable();
423
424 trace_power_flow(mpidr, CLUSTER_DOWN);
425 }
426}
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800427#else
428static void plat_power_domain_off(const psci_power_state_t *state)
429{
430 unsigned long mpidr = read_mpidr_el1();
431
432 /* Prevent interrupts from spuriously waking up this cpu */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800433 gicv2_cpuif_disable();
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800434
435 spm_hotplug_off(mpidr);
436
437 trace_power_flow(mpidr, CPU_DOWN);
438
439 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
440 /* Disable coherency if this cluster is to be turned off */
441 plat_cci_disable();
442
443 trace_power_flow(mpidr, CLUSTER_DOWN);
444 }
445}
446#endif
developer65014b82015-04-13 14:47:57 +0800447
448/*******************************************************************************
449 * MTK_platform handler called when an affinity instance is about to be
450 * suspended. The level and mpidr determine the affinity instance. The 'state'
451 * arg. allows the platform to decide whether the cluster is being turned off
452 * and take apt actions.
453 *
454 * CAUTION: This function is called with coherent stacks so that caches can be
455 * turned off, flushed and coherency disabled. There is no guarantee that caches
456 * will remain turned on across calls to this function as each affinity level is
457 * dealt with. So do not write & read global variables across calls. It will be
458 * wise to do flush a write to the global to prevent unpredictable results.
459 ******************************************************************************/
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800460#if ENABLE_PLAT_COMPAT
developer65014b82015-04-13 14:47:57 +0800461static void plat_affinst_suspend(unsigned long sec_entrypoint,
462 unsigned int afflvl,
463 unsigned int state)
464{
465 unsigned long mpidr = read_mpidr_el1();
466 unsigned long cluster_id;
467 unsigned long cpu_id;
468 uintptr_t rv;
469
470 /* Determine if any platform actions need to be executed. */
471 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
472 return;
473
474 cpu_id = mpidr & MPIDR_CPU_MASK;
475 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
476
477 if (cluster_id)
478 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
479 else
480 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
481
482 mmio_write_32(rv, sec_entrypoint);
483
developer69a03832015-11-16 14:33:35 +0800484 if (afflvl < MPIDR_AFFLVL2)
485 spm_mcdi_prepare_for_off_state(mpidr, afflvl);
developer65014b82015-04-13 14:47:57 +0800486
487 if (afflvl >= MPIDR_AFFLVL0)
488 mt_platform_save_context(mpidr);
489
490 /* Perform the common cluster specific operations */
491 if (afflvl >= MPIDR_AFFLVL1) {
492 /* Disable coherency if this cluster is to be turned off */
493 plat_cci_disable();
developer65014b82015-04-13 14:47:57 +0800494 }
495
496 if (afflvl >= MPIDR_AFFLVL2) {
developer69a03832015-11-16 14:33:35 +0800497 disable_scu(mpidr);
developer65014b82015-04-13 14:47:57 +0800498 generic_timer_backup();
499 spm_system_suspend();
500 /* Prevent interrupts from spuriously waking up this cpu */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800501 gicv2_cpuif_disable();
developer65014b82015-04-13 14:47:57 +0800502 }
503}
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800504#else
505static void plat_power_domain_suspend(const psci_power_state_t *state)
506{
507 unsigned long mpidr = read_mpidr_el1();
508 unsigned long cluster_id;
509 unsigned long cpu_id;
510 uintptr_t rv;
511
512 cpu_id = mpidr & MPIDR_CPU_MASK;
513 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
514
515 if (cluster_id)
516 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
517 else
518 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
519
520 mmio_write_32(rv, secure_entrypoint);
521
522 if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
523 spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0);
524 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
525 spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1);
526 }
527
528 mt_platform_save_context(mpidr);
529
530 /* Perform the common cluster specific operations */
531 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
532 /* Disable coherency if this cluster is to be turned off */
533 plat_cci_disable();
534 }
535
536 if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
537 disable_scu(mpidr);
538 generic_timer_backup();
539 spm_system_suspend();
540 /* Prevent interrupts from spuriously waking up this cpu */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800541 gicv2_cpuif_disable();
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800542 }
543}
544#endif
developer65014b82015-04-13 14:47:57 +0800545
546/*******************************************************************************
547 * MTK_platform handler called when an affinity instance has just been powered
548 * on after being turned off earlier. The level and mpidr determine the affinity
549 * instance. The 'state' arg. allows the platform to decide whether the cluster
550 * was turned off prior to wakeup and do what's necessary to setup it up
551 * correctly.
552 ******************************************************************************/
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800553#if ENABLE_PLAT_COMPAT
developer65014b82015-04-13 14:47:57 +0800554static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
555{
556 unsigned long mpidr = read_mpidr_el1();
557
558 /* Determine if any platform actions need to be executed. */
559 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
560 return;
561
562 /* Perform the common cluster specific operations */
563 if (afflvl >= MPIDR_AFFLVL1) {
developer65014b82015-04-13 14:47:57 +0800564 /* Enable coherency if this cluster was off */
565 plat_cci_enable();
566 trace_power_flow(mpidr, CLUSTER_UP);
567 }
568
569 /* Enable the gic cpu interface */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800570 gicv2_cpuif_enable();
571 gicv2_pcpu_distif_init();
developer65014b82015-04-13 14:47:57 +0800572 trace_power_flow(mpidr, CPU_UP);
573}
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800574#else
575void mtk_system_pwr_domain_resume(void);
576
577static void plat_power_domain_on_finish(const psci_power_state_t *state)
578{
579 unsigned long mpidr = read_mpidr_el1();
580
581 assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF);
582
583 if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
584 (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
585 mtk_system_pwr_domain_resume();
586
587 if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) {
588 plat_cci_enable();
589 trace_power_flow(mpidr, CLUSTER_UP);
590 }
591
592 if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
593 (state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
594 return;
595
596 /* Enable the gic cpu interface */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800597 gicv2_cpuif_enable();
598 gicv2_pcpu_distif_init();
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800599 trace_power_flow(mpidr, CPU_UP);
600}
601#endif
developer65014b82015-04-13 14:47:57 +0800602
603/*******************************************************************************
604 * MTK_platform handler called when an affinity instance has just been powered
605 * on after having been suspended earlier. The level and mpidr determine the
606 * affinity instance.
607 ******************************************************************************/
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800608#if ENABLE_PLAT_COMPAT
developer65014b82015-04-13 14:47:57 +0800609static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
610{
611 unsigned long mpidr = read_mpidr_el1();
612
613 /* Determine if any platform actions need to be executed. */
614 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
615 return;
616
617 if (afflvl >= MPIDR_AFFLVL2) {
618 /* Enable the gic cpu interface */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800619 plat_arm_gic_init();
developer65014b82015-04-13 14:47:57 +0800620 spm_system_suspend_finish();
developer69a03832015-11-16 14:33:35 +0800621 enable_scu(mpidr);
developer65014b82015-04-13 14:47:57 +0800622 }
623
624 /* Perform the common cluster specific operations */
625 if (afflvl >= MPIDR_AFFLVL1) {
developer65014b82015-04-13 14:47:57 +0800626 /* Enable coherency if this cluster was off */
627 plat_cci_enable();
developer65014b82015-04-13 14:47:57 +0800628 }
629
630 if (afflvl >= MPIDR_AFFLVL0)
631 mt_platform_restore_context(mpidr);
632
developer69a03832015-11-16 14:33:35 +0800633 if (afflvl < MPIDR_AFFLVL2)
634 spm_mcdi_finish_for_on_state(mpidr, afflvl);
developer65014b82015-04-13 14:47:57 +0800635
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800636 gicv2_pcpu_distif_init();
developer65014b82015-04-13 14:47:57 +0800637}
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800638#else
639static void plat_power_domain_suspend_finish(const psci_power_state_t *state)
640{
641 unsigned long mpidr = read_mpidr_el1();
642
643 if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET)
644 return;
645
646 if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
647 /* Enable the gic cpu interface */
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800648 plat_arm_gic_init();
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800649 spm_system_suspend_finish();
650 enable_scu(mpidr);
651 }
652
653 /* Perform the common cluster specific operations */
654 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
655 /* Enable coherency if this cluster was off */
656 plat_cci_enable();
657 }
658
659 mt_platform_restore_context(mpidr);
660
661 if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
662 spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0);
663 if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
664 spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1);
665 }
666
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800667 gicv2_pcpu_distif_init();
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800668}
669#endif
developer65014b82015-04-13 14:47:57 +0800670
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800671#if ENABLE_PLAT_COMPAT
developer65014b82015-04-13 14:47:57 +0800672static unsigned int plat_get_sys_suspend_power_state(void)
673{
674 /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
675 return psci_make_powerstate(0, 1, 2);
676}
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800677#else
678static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state)
679{
680 assert(PLAT_MAX_PWR_LVL >= 2);
681
682 for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
683 req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
684}
685#endif
developer65014b82015-04-13 14:47:57 +0800686
687/*******************************************************************************
688 * MTK handlers to shutdown/reboot the system
689 ******************************************************************************/
690static void __dead2 plat_system_off(void)
691{
692 INFO("MTK System Off\n");
693
developer65014b82015-04-13 14:47:57 +0800694 rtc_bbpu_power_down();
695
696 wfi();
697 ERROR("MTK System Off: operation not handled.\n");
698 panic();
699}
700
701static void __dead2 plat_system_reset(void)
702{
703 /* Write the System Configuration Control Register */
704 INFO("MTK System Reset\n");
705
developer9425e752015-09-04 10:30:57 +0800706 mmio_clrsetbits_32(MTK_WDT_BASE,
707 (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
708 MTK_WDT_MODE_KEY);
developer65014b82015-04-13 14:47:57 +0800709 mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
710 mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
711
712 wfi();
713 ERROR("MTK System Reset: operation not handled.\n");
714 panic();
715}
716
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800717#if !ENABLE_PLAT_COMPAT
Koan-Sin Tan22ea87c2016-04-18 14:28:03 +0800718#if !PSCI_EXTENDED_STATE_ID
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800719static int plat_validate_power_state(unsigned int power_state,
720 psci_power_state_t *req_state)
721{
722 int pstate = psci_get_pstate_type(power_state);
723 int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
724 int i;
725
726 assert(req_state);
727
728 if (pwr_lvl > PLAT_MAX_PWR_LVL)
729 return PSCI_E_INVALID_PARAMS;
730
731 /* Sanity check the requested state */
732 if (pstate == PSTATE_TYPE_STANDBY) {
733 /*
734 * It's possible to enter standby only on power level 0
735 * Ignore any other power level.
736 */
737 if (pwr_lvl != 0)
738 return PSCI_E_INVALID_PARAMS;
739
740 req_state->pwr_domain_state[MTK_PWR_LVL0] =
741 MTK_LOCAL_STATE_RET;
742 } else {
743 for (i = 0; i <= pwr_lvl; i++)
744 req_state->pwr_domain_state[i] =
745 MTK_LOCAL_STATE_OFF;
746 }
747
748 /*
749 * We expect the 'state id' to be zero.
750 */
751 if (psci_get_pstate_id(power_state))
752 return PSCI_E_INVALID_PARAMS;
753
Koan-Sin Tan22ea87c2016-04-18 14:28:03 +0800754 return PSCI_E_SUCCESS;
755}
756#else
757int plat_validate_power_state(unsigned int power_state,
758 psci_power_state_t *req_state)
759{
760 unsigned int state_id;
761 int i;
762
763 assert(req_state);
764
765 /*
766 * Currently we are using a linear search for finding the matching
767 * entry in the idle power state array. This can be made a binary
768 * search if the number of entries justify the additional complexity.
769 */
770 for (i = 0; !!mtk_pm_idle_states[i]; i++) {
771 if (power_state == mtk_pm_idle_states[i])
772 break;
773 }
774
775 /* Return error if entry not found in the idle state array */
776 if (!mtk_pm_idle_states[i])
777 return PSCI_E_INVALID_PARAMS;
778
779 i = 0;
780 state_id = psci_get_pstate_id(power_state);
781
782 /* Parse the State ID and populate the state info parameter */
783 while (state_id) {
784 req_state->pwr_domain_state[i++] = state_id &
785 MTK_LOCAL_PSTATE_MASK;
786 state_id >>= MTK_LOCAL_PSTATE_WIDTH;
787 }
788
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800789 return PSCI_E_SUCCESS;
790}
Koan-Sin Tan22ea87c2016-04-18 14:28:03 +0800791#endif
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800792
793void mtk_system_pwr_domain_resume(void)
794{
795 console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE);
796
797 /* Assert system power domain is available on the platform */
798 assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2);
799
Koan-Sin Tan1d2b6392016-04-18 15:17:57 +0800800 plat_arm_gic_init();
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800801}
802#endif
803
804#if ENABLE_PLAT_COMPAT
developer65014b82015-04-13 14:47:57 +0800805/*******************************************************************************
806 * Export the platform handlers to enable psci to invoke them
807 ******************************************************************************/
808static const plat_pm_ops_t plat_plat_pm_ops = {
809 .affinst_standby = plat_affinst_standby,
810 .affinst_on = plat_affinst_on,
811 .affinst_off = plat_affinst_off,
812 .affinst_suspend = plat_affinst_suspend,
813 .affinst_on_finish = plat_affinst_on_finish,
814 .affinst_suspend_finish = plat_affinst_suspend_finish,
815 .system_off = plat_system_off,
816 .system_reset = plat_system_reset,
817 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
818};
819
820/*******************************************************************************
821 * Export the platform specific power ops & initialize the mtk_platform power
822 * controller
823 ******************************************************************************/
824int platform_setup_pm(const plat_pm_ops_t **plat_ops)
825{
826 *plat_ops = &plat_plat_pm_ops;
827 return 0;
828}
Koan-Sin Tanbc998072017-01-19 16:43:49 +0800829#else
830static const plat_psci_ops_t plat_plat_pm_ops = {
831 .cpu_standby = plat_cpu_standby,
832 .pwr_domain_on = plat_power_domain_on,
833 .pwr_domain_on_finish = plat_power_domain_on_finish,
834 .pwr_domain_off = plat_power_domain_off,
835 .pwr_domain_suspend = plat_power_domain_suspend,
836 .pwr_domain_suspend_finish = plat_power_domain_suspend_finish,
837 .system_off = plat_system_off,
838 .system_reset = plat_system_reset,
839 .validate_power_state = plat_validate_power_state,
840 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
841};
842
843int plat_setup_psci_ops(uintptr_t sec_entrypoint,
844 const plat_psci_ops_t **psci_ops)
845{
846 *psci_ops = &plat_plat_pm_ops;
847 secure_entrypoint = sec_entrypoint;
848 return 0;
849}
850
851/*
852 * The PSCI generic code uses this API to let the platform participate in state
853 * coordination during a power management operation. It compares the platform
854 * specific local power states requested by each cpu for a given power domain
855 * and returns the coordinated target power state that the domain should
856 * enter. A platform assigns a number to a local power state. This default
857 * implementation assumes that the platform assigns these numbers in order of
858 * increasing depth of the power state i.e. for two power states X & Y, if X < Y
859 * then X represents a shallower power state than Y. As a result, the
860 * coordinated target local power state for a power domain will be the minimum
861 * of the requested local power states.
862 */
863plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
864 const plat_local_state_t *states,
865 unsigned int ncpu)
866{
867 plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
868
869 assert(ncpu);
870
871 do {
872 temp = *states++;
873 if (temp < target)
874 target = temp;
875 } while (--ncpu);
876
877 return target;
878}
879#endif