blob: d0506645f47457b8d093a8881002fd8ea148a396 [file] [log] [blame]
developer550bf5e2016-07-11 16:05:23 +08001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
developer550bf5e2016-07-11 16:05:23 +08005 */
6
7#include <arch_helpers.h>
developer550bf5e2016-07-11 16:05:23 +08008#include <assert.h>
9#include <bakery_lock.h>
10#include <cci.h>
11#include <console.h>
12#include <debug.h>
13#include <errno.h>
14#include <mcucfg.h>
15#include <mmio.h>
developer550bf5e2016-07-11 16:05:23 +080016#include <plat_private.h>
Isla Mitchelle3631462017-07-14 10:46:32 +010017#include <platform_def.h>
developer550bf5e2016-07-11 16:05:23 +080018#include <power_tracer.h>
19#include <psci.h>
20#include <scu.h>
21
22struct core_context {
23 unsigned long timer_data[8];
24 unsigned int count;
25 unsigned int rst;
26 unsigned int abt;
27 unsigned int brk;
28};
29
30struct cluster_context {
31 struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
32};
33
34/*
35 * Top level structure to hold the complete context of a multi cluster system
36 */
37struct system_context {
38 struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
39};
40
41/*
42 * Top level structure which encapsulates the context of the entire system
43 */
44static struct system_context dormant_data[1];
45
46static inline struct cluster_context *system_cluster(
47 struct system_context *system,
48 uint32_t clusterid)
49{
50 return &system->cluster[clusterid];
51}
52
53static inline struct core_context *cluster_core(struct cluster_context *cluster,
54 uint32_t cpuid)
55{
56 return &cluster->core[cpuid];
57}
58
59static struct cluster_context *get_cluster_data(unsigned long mpidr)
60{
61 uint32_t clusterid;
62
63 clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
64
65 return system_cluster(dormant_data, clusterid);
66}
67
68static struct core_context *get_core_data(unsigned long mpidr)
69{
70 struct cluster_context *cluster;
71 uint32_t cpuid;
72
73 cluster = get_cluster_data(mpidr);
74 cpuid = mpidr & MPIDR_CPU_MASK;
75
76 return cluster_core(cluster, cpuid);
77}
78
79static void mt_save_generic_timer(unsigned long *container)
80{
81 uint64_t ctl;
82 uint64_t val;
83
84 __asm__ volatile("mrs %x0, cntkctl_el1\n\t"
85 "mrs %x1, cntp_cval_el0\n\t"
86 "stp %x0, %x1, [%2, #0]"
87 : "=&r" (ctl), "=&r" (val)
88 : "r" (container)
89 : "memory");
90
91 __asm__ volatile("mrs %x0, cntp_tval_el0\n\t"
92 "mrs %x1, cntp_ctl_el0\n\t"
93 "stp %x0, %x1, [%2, #16]"
94 : "=&r" (val), "=&r" (ctl)
95 : "r" (container)
96 : "memory");
97
98 __asm__ volatile("mrs %x0, cntv_tval_el0\n\t"
99 "mrs %x1, cntv_ctl_el0\n\t"
100 "stp %x0, %x1, [%2, #32]"
101 : "=&r" (val), "=&r" (ctl)
102 : "r" (container)
103 : "memory");
104}
105
106static void mt_restore_generic_timer(unsigned long *container)
107{
108 uint64_t ctl;
109 uint64_t val;
110
111 __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t"
112 "msr cntkctl_el1, %x0\n\t"
113 "msr cntp_cval_el0, %x1"
114 : "=&r" (ctl), "=&r" (val)
115 : "r" (container)
116 : "memory");
117
118 __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t"
119 "msr cntp_tval_el0, %x0\n\t"
120 "msr cntp_ctl_el0, %x1"
121 : "=&r" (val), "=&r" (ctl)
122 : "r" (container)
123 : "memory");
124
125 __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t"
126 "msr cntv_tval_el0, %x0\n\t"
127 "msr cntv_ctl_el0, %x1"
128 : "=&r" (val), "=&r" (ctl)
129 : "r" (container)
130 : "memory");
131}
132
133static void stop_generic_timer(void)
134{
135 /*
136 * Disable the timer and mask the irq to prevent
137 * suprious interrupts on this cpu interface. It
138 * will bite us when we come back if we don't. It
139 * will be replayed on the inbound cluster.
140 */
141 uint64_t cntpctl = read_cntp_ctl_el0();
142
143 write_cntp_ctl_el0(clr_cntp_ctl_enable(cntpctl));
144}
145
146static void mt_cpu_save(unsigned long mpidr)
147{
148 struct core_context *core;
149
150 core = get_core_data(mpidr);
151 mt_save_generic_timer(core->timer_data);
152
153 /* disable timer irq, and upper layer should enable it again. */
154 stop_generic_timer();
155}
156
157static void mt_cpu_restore(unsigned long mpidr)
158{
159 struct core_context *core;
160
161 core = get_core_data(mpidr);
162 mt_restore_generic_timer(core->timer_data);
163}
164
165static void mt_platform_save_context(unsigned long mpidr)
166{
167 /* mcusys_save_context: */
168 mt_cpu_save(mpidr);
169}
170
171static void mt_platform_restore_context(unsigned long mpidr)
172{
173 /* mcusys_restore_context: */
174 mt_cpu_restore(mpidr);
175}
176
177/*******************************************************************************
178* Private function which is used to determine if any platform actions
179* should be performed for the specified affinity instance given its
180* state. Nothing needs to be done if the 'state' is not off or if this is not
181* the highest affinity level which will enter the 'state'.
182*******************************************************************************/
183static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
184{
185 unsigned int max_phys_off_afflvl;
186
187 assert(afflvl <= MPIDR_AFFLVL2);
188
189 if (state != PSCI_STATE_OFF)
190 return -EAGAIN;
191
192 /*
193 * Find the highest affinity level which will be suspended and postpone
194 * all the platform specific actions until that level is hit.
195 */
196 max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
197 assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
198 if (afflvl != max_phys_off_afflvl)
199 return -EAGAIN;
200
201 return 0;
202}
203
204/*******************************************************************************
205 * MTK_platform handler called when an affinity instance is about to enter
206 * standby.
207 ******************************************************************************/
208static void plat_affinst_standby(unsigned int power_state)
209{
210 unsigned int target_afflvl;
211
212 /* Sanity check the requested state */
213 target_afflvl = psci_get_pstate_afflvl(power_state);
214
215 /*
216 * It's possible to enter standby only on affinity level 0 i.e. a cpu
217 * on the MTK_platform. Ignore any other affinity level.
218 */
219 if (target_afflvl == MPIDR_AFFLVL0) {
220 /*
221 * Enter standby state. dsb is good practice before using wfi
222 * to enter low power states.
223 */
224 dsb();
225 wfi();
226 }
227}
228
229/*******************************************************************************
230 * MTK_platform handler called when an affinity instance is about to be turned
231 * on. The level and mpidr determine the affinity instance.
232 ******************************************************************************/
233static int plat_affinst_on(unsigned long mpidr,
234 unsigned long sec_entrypoint,
235 unsigned int afflvl,
236 unsigned int state)
237{
238 int rc = PSCI_E_SUCCESS;
239 unsigned long cpu_id;
240 unsigned long cluster_id;
241 uintptr_t rv;
242
243 /*
244 * It's possible to turn on only affinity level 0 i.e. a cpu
245 * on the MTK_platform. Ignore any other affinity level.
246 */
247 if (afflvl != MPIDR_AFFLVL0)
248 return rc;
249
250 cpu_id = mpidr & MPIDR_CPU_MASK;
251 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
252
253 if (cluster_id)
254 rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
255 else
256 rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
257
258 mmio_write_32(rv, sec_entrypoint);
259 INFO("mt_on[%ld:%ld], entry %x\n",
260 cluster_id, cpu_id, mmio_read_32(rv));
261
262 return rc;
263}
264
265/*******************************************************************************
266 * MTK_platform handler called when an affinity instance is about to be turned
267 * off. The level and mpidr determine the affinity instance. The 'state' arg.
268 * allows the platform to decide whether the cluster is being turned off and
269 * take apt actions.
270 *
271 * CAUTION: This function is called with coherent stacks so that caches can be
272 * turned off, flushed and coherency disabled. There is no guarantee that caches
273 * will remain turned on across calls to this function as each affinity level is
274 * dealt with. So do not write & read global variables across calls. It will be
275 * wise to do flush a write to the global to prevent unpredictable results.
276 ******************************************************************************/
277static void plat_affinst_off(unsigned int afflvl, unsigned int state)
278{
279 unsigned long mpidr = read_mpidr_el1();
280
281 /* Determine if any platform actions need to be executed. */
282 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
283 return;
284
285 /* Prevent interrupts from spuriously waking up this cpu */
286 plat_mt_gic_cpuif_disable();
287
288 trace_power_flow(mpidr, CPU_DOWN);
289
290 if (afflvl != MPIDR_AFFLVL0) {
291 /* Disable coherency if this cluster is to be turned off */
292 plat_cci_disable();
293
294 trace_power_flow(mpidr, CLUSTER_DOWN);
295 }
296}
297
298/*******************************************************************************
299 * MTK_platform handler called when an affinity instance is about to be
300 * suspended. The level and mpidr determine the affinity instance. The 'state'
301 * arg. allows the platform to decide whether the cluster is being turned off
302 * and take apt actions.
303 *
304 * CAUTION: This function is called with coherent stacks so that caches can be
305 * turned off, flushed and coherency disabled. There is no guarantee that caches
306 * will remain turned on across calls to this function as each affinity level is
307 * dealt with. So do not write & read global variables across calls. It will be
308 * wise to do flush a write to the global to prevent unpredictable results.
309 ******************************************************************************/
310static void plat_affinst_suspend(unsigned long sec_entrypoint,
311 unsigned int afflvl,
312 unsigned int state)
313{
314 unsigned long mpidr = read_mpidr_el1();
315 unsigned long cluster_id;
316 unsigned long cpu_id;
317 uintptr_t rv;
318
319 /* Determine if any platform actions need to be executed. */
320 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
321 return;
322
323 cpu_id = mpidr & MPIDR_CPU_MASK;
324 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
325
326 if (cluster_id)
327 rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
328 else
329 rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
330
331 mmio_write_32(rv, sec_entrypoint);
332
333 if (afflvl >= MPIDR_AFFLVL0)
334 mt_platform_save_context(mpidr);
335
336 /* Perform the common cluster specific operations */
337 if (afflvl >= MPIDR_AFFLVL1) {
338 /* Disable coherency if this cluster is to be turned off */
339 plat_cci_disable();
340 disable_scu(mpidr);
341
342 trace_power_flow(mpidr, CLUSTER_SUSPEND);
343 }
344
345 if (afflvl >= MPIDR_AFFLVL2) {
346 /* Prevent interrupts from spuriously waking up this cpu */
347 plat_mt_gic_cpuif_disable();
348 }
349}
350
351/*******************************************************************************
352 * MTK_platform handler called when an affinity instance has just been powered
353 * on after being turned off earlier. The level and mpidr determine the affinity
354 * instance. The 'state' arg. allows the platform to decide whether the cluster
355 * was turned off prior to wakeup and do what's necessary to setup it up
356 * correctly.
357 ******************************************************************************/
358static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
359{
360 unsigned long mpidr = read_mpidr_el1();
361
362 /* Determine if any platform actions need to be executed. */
363 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
364 return;
365
366 /* Perform the common cluster specific operations */
367 if (afflvl >= MPIDR_AFFLVL1) {
368 enable_scu(mpidr);
369
370 /* Enable coherency if this cluster was off */
371 plat_cci_enable();
372 trace_power_flow(mpidr, CLUSTER_UP);
373 }
374
375 /* Enable the gic cpu interface */
376 plat_mt_gic_cpuif_enable();
377 plat_mt_gic_pcpu_init();
378 trace_power_flow(mpidr, CPU_UP);
379}
380
381/*******************************************************************************
382 * MTK_platform handler called when an affinity instance has just been powered
383 * on after having been suspended earlier. The level and mpidr determine the
384 * affinity instance.
385 ******************************************************************************/
386static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
387{
388 unsigned long mpidr = read_mpidr_el1();
389
390 /* Determine if any platform actions need to be executed. */
391 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
392 return;
393
394 if (afflvl >= MPIDR_AFFLVL2) {
395 /* Enable the gic cpu interface */
396 plat_mt_gic_init();
397 plat_mt_gic_cpuif_enable();
398 }
399
400 /* Perform the common cluster specific operations */
401 if (afflvl >= MPIDR_AFFLVL1) {
402 enable_scu(mpidr);
403
404 /* Enable coherency if this cluster was off */
405 plat_cci_enable();
406 trace_power_flow(mpidr, CLUSTER_UP);
407 }
408
409 if (afflvl >= MPIDR_AFFLVL0)
410 mt_platform_restore_context(mpidr);
411
412 plat_mt_gic_pcpu_init();
413}
414
415static unsigned int plat_get_sys_suspend_power_state(void)
416{
417 /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
418 return psci_make_powerstate(0, 1, 2);
419}
420
421/*******************************************************************************
422 * MTK handlers to shutdown/reboot the system
423 ******************************************************************************/
424static void __dead2 plat_system_off(void)
425{
426 INFO("MTK System Off\n");
427 wfi();
428 ERROR("MTK System Off: operation not handled.\n");
429 panic();
430}
431
432static void __dead2 plat_system_reset(void)
433{
434 /* Write the System Configuration Control Register */
435 INFO("MTK System Reset\n");
436
437 mmio_clrbits_32(MTK_WDT_BASE,
438 (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ));
439 mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
440 mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
441
442 wfi();
443 ERROR("MTK System Reset: operation not handled.\n");
444 panic();
445}
446
447/*******************************************************************************
448 * Export the platform handlers to enable psci to invoke them
449 ******************************************************************************/
450static const plat_pm_ops_t plat_plat_pm_ops = {
451 .affinst_standby = plat_affinst_standby,
452 .affinst_on = plat_affinst_on,
453 .affinst_off = plat_affinst_off,
454 .affinst_suspend = plat_affinst_suspend,
455 .affinst_on_finish = plat_affinst_on_finish,
456 .affinst_suspend_finish = plat_affinst_suspend_finish,
457 .system_off = plat_system_off,
458 .system_reset = plat_system_reset,
459 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
460};
461
462/*******************************************************************************
463 * Export the platform specific power ops & initialize the mtk_platform power
464 * controller
465 ******************************************************************************/
466int platform_setup_pm(const plat_pm_ops_t **plat_ops)
467{
468 *plat_ops = &plat_plat_pm_ops;
469 return 0;
470}