blob: a84d03103808b025364710ae85e52fd2b240d72d [file] [log] [blame]
developer65014b82015-04-13 14:47:57 +08001/*
2 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
32#include <arm_gic.h>
33#include <assert.h>
34#include <bakery_lock.h>
35#include <cci.h>
36#include <console.h>
37#include <debug.h>
38#include <errno.h>
39#include <gpio.h>
40#include <mcucfg.h>
41#include <mmio.h>
42#include <mt8173_def.h>
43#include <mt_cpuxgpt.h> /* generic_timer_backup() */
44#include <plat_private.h>
45#include <power_tracer.h>
46#include <psci.h>
47#include <rtc.h>
48#include <scu.h>
49#include <spm_hotplug.h>
50#include <spm_mcdi.h>
51#include <spm_suspend.h>
52
53struct core_context {
54 unsigned long timer_data[8];
55 unsigned int count;
56 unsigned int rst;
57 unsigned int abt;
58 unsigned int brk;
59};
60
61struct cluster_context {
62 struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
63};
64
65/*
66 * Top level structure to hold the complete context of a multi cluster system
67 */
68struct system_context {
69 struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
70};
71
72/*
73 * Top level structure which encapsulates the context of the entire system
74 */
75static struct system_context dormant_data[1];
76
77static inline struct cluster_context *system_cluster(
78 struct system_context *system,
79 uint32_t clusterid)
80{
81 return &system->cluster[clusterid];
82}
83
84static inline struct core_context *cluster_core(struct cluster_context *cluster,
85 uint32_t cpuid)
86{
87 return &cluster->core[cpuid];
88}
89
90static struct cluster_context *get_cluster_data(unsigned long mpidr)
91{
92 uint32_t clusterid;
93
94 clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
95
96 return system_cluster(dormant_data, clusterid);
97}
98
99static struct core_context *get_core_data(unsigned long mpidr)
100{
101 struct cluster_context *cluster;
102 uint32_t cpuid;
103
104 cluster = get_cluster_data(mpidr);
105 cpuid = mpidr & MPIDR_CPU_MASK;
106
107 return cluster_core(cluster, cpuid);
108}
109
110static void mt_save_generic_timer(unsigned long *container)
111{
112 uint64_t ctl;
113 uint64_t val;
114
115 __asm__ volatile("mrs %x0, cntkctl_el1\n\t"
116 "mrs %x1, cntp_cval_el0\n\t"
117 "stp %x0, %x1, [%2, #0]"
118 : "=&r" (ctl), "=&r" (val)
119 : "r" (container)
120 : "memory");
121
122 __asm__ volatile("mrs %x0, cntp_tval_el0\n\t"
123 "mrs %x1, cntp_ctl_el0\n\t"
124 "stp %x0, %x1, [%2, #16]"
125 : "=&r" (val), "=&r" (ctl)
126 : "r" (container)
127 : "memory");
128
129 __asm__ volatile("mrs %x0, cntv_tval_el0\n\t"
130 "mrs %x1, cntv_ctl_el0\n\t"
131 "stp %x0, %x1, [%2, #32]"
132 : "=&r" (val), "=&r" (ctl)
133 : "r" (container)
134 : "memory");
135}
136
137static void mt_restore_generic_timer(unsigned long *container)
138{
139 uint64_t ctl;
140 uint64_t val;
141
142 __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t"
143 "msr cntkctl_el1, %x0\n\t"
144 "msr cntp_cval_el0, %x1"
145 : "=&r" (ctl), "=&r" (val)
146 : "r" (container)
147 : "memory");
148
149 __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t"
150 "msr cntp_tval_el0, %x0\n\t"
151 "msr cntp_ctl_el0, %x1"
152 : "=&r" (val), "=&r" (ctl)
153 : "r" (container)
154 : "memory");
155
156 __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t"
157 "msr cntv_tval_el0, %x0\n\t"
158 "msr cntv_ctl_el0, %x1"
159 : "=&r" (val), "=&r" (ctl)
160 : "r" (container)
161 : "memory");
162}
163
164static inline uint64_t read_cntpctl(void)
165{
166 uint64_t cntpctl;
167
168 __asm__ volatile("mrs %x0, cntp_ctl_el0"
169 : "=r" (cntpctl) : : "memory");
170
171 return cntpctl;
172}
173
174static inline void write_cntpctl(uint64_t cntpctl)
175{
176 __asm__ volatile("msr cntp_ctl_el0, %x0" : : "r"(cntpctl));
177}
178
179static void stop_generic_timer(void)
180{
181 /*
182 * Disable the timer and mask the irq to prevent
183 * suprious interrupts on this cpu interface. It
184 * will bite us when we come back if we don't. It
185 * will be replayed on the inbound cluster.
186 */
187 uint64_t cntpctl = read_cntpctl();
188
189 write_cntpctl(clr_cntp_ctl_enable(cntpctl));
190}
191
192static void mt_cpu_save(unsigned long mpidr)
193{
194 struct core_context *core;
195
196 core = get_core_data(mpidr);
197 mt_save_generic_timer(core->timer_data);
198
199 /* disable timer irq, and upper layer should enable it again. */
200 stop_generic_timer();
201}
202
203static void mt_cpu_restore(unsigned long mpidr)
204{
205 struct core_context *core;
206
207 core = get_core_data(mpidr);
208 mt_restore_generic_timer(core->timer_data);
209}
210
211static void mt_platform_save_context(unsigned long mpidr)
212{
213 /* mcusys_save_context: */
214 mt_cpu_save(mpidr);
215}
216
217static void mt_platform_restore_context(unsigned long mpidr)
218{
219 /* mcusys_restore_context: */
220 mt_cpu_restore(mpidr);
221}
222
223/*******************************************************************************
224* Private function which is used to determine if any platform actions
225* should be performed for the specified affinity instance given its
226* state. Nothing needs to be done if the 'state' is not off or if this is not
227* the highest affinity level which will enter the 'state'.
228*******************************************************************************/
229static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
230{
231 unsigned int max_phys_off_afflvl;
232
233 assert(afflvl <= MPIDR_AFFLVL2);
234
235 if (state != PSCI_STATE_OFF)
236 return -EAGAIN;
237
238 /*
239 * Find the highest affinity level which will be suspended and postpone
240 * all the platform specific actions until that level is hit.
241 */
242 max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
243 assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
244 if (afflvl != max_phys_off_afflvl)
245 return -EAGAIN;
246
247 return 0;
248}
249
250/*******************************************************************************
251 * MTK_platform handler called when an affinity instance is about to enter
252 * standby.
253 ******************************************************************************/
254static void plat_affinst_standby(unsigned int power_state)
255{
256 unsigned int target_afflvl;
257
258 /* Sanity check the requested state */
259 target_afflvl = psci_get_pstate_afflvl(power_state);
260
261 /*
262 * It's possible to enter standby only on affinity level 0 i.e. a cpu
263 * on the MTK_platform. Ignore any other affinity level.
264 */
265 if (target_afflvl == MPIDR_AFFLVL0) {
266 /*
267 * Enter standby state. dsb is good practice before using wfi
268 * to enter low power states.
269 */
270 dsb();
271 wfi();
272 }
273}
274
275/*******************************************************************************
276 * MTK_platform handler called when an affinity instance is about to be turned
277 * on. The level and mpidr determine the affinity instance.
278 ******************************************************************************/
279static int plat_affinst_on(unsigned long mpidr,
280 unsigned long sec_entrypoint,
281 unsigned int afflvl,
282 unsigned int state)
283{
284 int rc = PSCI_E_SUCCESS;
285 unsigned long cpu_id;
286 unsigned long cluster_id;
287 uintptr_t rv;
288
289 /*
290 * It's possible to turn on only affinity level 0 i.e. a cpu
291 * on the MTK_platform. Ignore any other affinity level.
292 */
293 if (afflvl != MPIDR_AFFLVL0)
294 return rc;
295
296 cpu_id = mpidr & MPIDR_CPU_MASK;
297 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
298
299 if (cluster_id)
300 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
301 else
302 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
303
304 mmio_write_32(rv, sec_entrypoint);
305 INFO("mt_on[%ld:%ld], entry %x\n",
306 cluster_id, cpu_id, mmio_read_32(rv));
307
308 spm_hotplug_on(mpidr);
309
310 return rc;
311}
312
313/*******************************************************************************
314 * MTK_platform handler called when an affinity instance is about to be turned
315 * off. The level and mpidr determine the affinity instance. The 'state' arg.
316 * allows the platform to decide whether the cluster is being turned off and
317 * take apt actions.
318 *
319 * CAUTION: This function is called with coherent stacks so that caches can be
320 * turned off, flushed and coherency disabled. There is no guarantee that caches
321 * will remain turned on across calls to this function as each affinity level is
322 * dealt with. So do not write & read global variables across calls. It will be
323 * wise to do flush a write to the global to prevent unpredictable results.
324 ******************************************************************************/
325static void plat_affinst_off(unsigned int afflvl, unsigned int state)
326{
327 unsigned long mpidr = read_mpidr_el1();
328
329 /* Determine if any platform actions need to be executed. */
330 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
331 return;
332
333 /* Prevent interrupts from spuriously waking up this cpu */
334 arm_gic_cpuif_deactivate();
335
336 spm_hotplug_off(mpidr);
337
338 trace_power_flow(mpidr, CPU_DOWN);
339
340 if (afflvl != MPIDR_AFFLVL0) {
341 /* Disable coherency if this cluster is to be turned off */
342 plat_cci_disable();
343
344 trace_power_flow(mpidr, CLUSTER_DOWN);
345 }
346}
347
348/*******************************************************************************
349 * MTK_platform handler called when an affinity instance is about to be
350 * suspended. The level and mpidr determine the affinity instance. The 'state'
351 * arg. allows the platform to decide whether the cluster is being turned off
352 * and take apt actions.
353 *
354 * CAUTION: This function is called with coherent stacks so that caches can be
355 * turned off, flushed and coherency disabled. There is no guarantee that caches
356 * will remain turned on across calls to this function as each affinity level is
357 * dealt with. So do not write & read global variables across calls. It will be
358 * wise to do flush a write to the global to prevent unpredictable results.
359 ******************************************************************************/
360static void plat_affinst_suspend(unsigned long sec_entrypoint,
361 unsigned int afflvl,
362 unsigned int state)
363{
364 unsigned long mpidr = read_mpidr_el1();
365 unsigned long cluster_id;
366 unsigned long cpu_id;
367 uintptr_t rv;
368
369 /* Determine if any platform actions need to be executed. */
370 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
371 return;
372
373 cpu_id = mpidr & MPIDR_CPU_MASK;
374 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
375
376 if (cluster_id)
377 rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
378 else
379 rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
380
381 mmio_write_32(rv, sec_entrypoint);
382
383 if (afflvl == MPIDR_AFFLVL0)
384 spm_mcdi_prepare(mpidr);
385
386 if (afflvl >= MPIDR_AFFLVL0)
387 mt_platform_save_context(mpidr);
388
389 /* Perform the common cluster specific operations */
390 if (afflvl >= MPIDR_AFFLVL1) {
391 /* Disable coherency if this cluster is to be turned off */
392 plat_cci_disable();
393 disable_scu(mpidr);
394
395 trace_power_flow(mpidr, CLUSTER_SUSPEND);
396 }
397
398 if (afflvl >= MPIDR_AFFLVL2) {
399 generic_timer_backup();
400 spm_system_suspend();
401 /* Prevent interrupts from spuriously waking up this cpu */
402 arm_gic_cpuif_deactivate();
403 }
404}
405
406/*******************************************************************************
407 * MTK_platform handler called when an affinity instance has just been powered
408 * on after being turned off earlier. The level and mpidr determine the affinity
409 * instance. The 'state' arg. allows the platform to decide whether the cluster
410 * was turned off prior to wakeup and do what's necessary to setup it up
411 * correctly.
412 ******************************************************************************/
413static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
414{
415 unsigned long mpidr = read_mpidr_el1();
416
417 /* Determine if any platform actions need to be executed. */
418 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
419 return;
420
421 /* Perform the common cluster specific operations */
422 if (afflvl >= MPIDR_AFFLVL1) {
423 enable_scu(mpidr);
424
425 /* Enable coherency if this cluster was off */
426 plat_cci_enable();
427 trace_power_flow(mpidr, CLUSTER_UP);
428 }
429
430 /* Enable the gic cpu interface */
431 arm_gic_cpuif_setup();
432 arm_gic_pcpu_distif_setup();
433 trace_power_flow(mpidr, CPU_UP);
434}
435
436/*******************************************************************************
437 * MTK_platform handler called when an affinity instance has just been powered
438 * on after having been suspended earlier. The level and mpidr determine the
439 * affinity instance.
440 ******************************************************************************/
441static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
442{
443 unsigned long mpidr = read_mpidr_el1();
444
445 /* Determine if any platform actions need to be executed. */
446 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
447 return;
448
449 if (afflvl >= MPIDR_AFFLVL2) {
450 /* Enable the gic cpu interface */
451 arm_gic_setup();
452 arm_gic_cpuif_setup();
453 spm_system_suspend_finish();
454 }
455
456 /* Perform the common cluster specific operations */
457 if (afflvl >= MPIDR_AFFLVL1) {
458 enable_scu(mpidr);
459
460 /* Enable coherency if this cluster was off */
461 plat_cci_enable();
462 trace_power_flow(mpidr, CLUSTER_UP);
463 }
464
465 if (afflvl >= MPIDR_AFFLVL0)
466 mt_platform_restore_context(mpidr);
467
468 if (afflvl == MPIDR_AFFLVL0)
469 spm_mcdi_finish(mpidr);
470
471 arm_gic_pcpu_distif_setup();
472}
473
474static unsigned int plat_get_sys_suspend_power_state(void)
475{
476 /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
477 return psci_make_powerstate(0, 1, 2);
478}
479
480/*******************************************************************************
481 * MTK handlers to shutdown/reboot the system
482 ******************************************************************************/
483static void __dead2 plat_system_off(void)
484{
485 INFO("MTK System Off\n");
486
487 gpio_set(GPIO120, GPIO_OUT_ZERO);
488 rtc_bbpu_power_down();
489
490 wfi();
491 ERROR("MTK System Off: operation not handled.\n");
492 panic();
493}
494
495static void __dead2 plat_system_reset(void)
496{
497 /* Write the System Configuration Control Register */
498 INFO("MTK System Reset\n");
499
developer9425e752015-09-04 10:30:57 +0800500 mmio_clrsetbits_32(MTK_WDT_BASE,
501 (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
502 MTK_WDT_MODE_KEY);
developer65014b82015-04-13 14:47:57 +0800503 mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
504 mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
505
506 wfi();
507 ERROR("MTK System Reset: operation not handled.\n");
508 panic();
509}
510
511/*******************************************************************************
512 * Export the platform handlers to enable psci to invoke them
513 ******************************************************************************/
514static const plat_pm_ops_t plat_plat_pm_ops = {
515 .affinst_standby = plat_affinst_standby,
516 .affinst_on = plat_affinst_on,
517 .affinst_off = plat_affinst_off,
518 .affinst_suspend = plat_affinst_suspend,
519 .affinst_on_finish = plat_affinst_on_finish,
520 .affinst_suspend_finish = plat_affinst_suspend_finish,
521 .system_off = plat_system_off,
522 .system_reset = plat_system_reset,
523 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
524};
525
526/*******************************************************************************
527 * Export the platform specific power ops & initialize the mtk_platform power
528 * controller
529 ******************************************************************************/
530int platform_setup_pm(const plat_pm_ops_t **plat_ops)
531{
532 *plat_ops = &plat_plat_pm_ops;
533 return 0;
534}