blob: a0b777535bedb4c66145af0ebea20b4414ea8538 [file] [log] [blame]
developer550bf5e2016-07-11 16:05:23 +08001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
developer550bf5e2016-07-11 16:05:23 +08005 */
6
7#include <arch_helpers.h>
8#include <arm_gic.h>
9#include <assert.h>
10#include <bakery_lock.h>
11#include <cci.h>
12#include <console.h>
13#include <debug.h>
14#include <errno.h>
15#include <mcucfg.h>
16#include <mmio.h>
17#include <platform_def.h>
18#include <plat_private.h>
19#include <power_tracer.h>
20#include <psci.h>
21#include <scu.h>
22
23struct core_context {
24 unsigned long timer_data[8];
25 unsigned int count;
26 unsigned int rst;
27 unsigned int abt;
28 unsigned int brk;
29};
30
31struct cluster_context {
32 struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
33};
34
35/*
36 * Top level structure to hold the complete context of a multi cluster system
37 */
38struct system_context {
39 struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
40};
41
42/*
43 * Top level structure which encapsulates the context of the entire system
44 */
45static struct system_context dormant_data[1];
46
47static inline struct cluster_context *system_cluster(
48 struct system_context *system,
49 uint32_t clusterid)
50{
51 return &system->cluster[clusterid];
52}
53
54static inline struct core_context *cluster_core(struct cluster_context *cluster,
55 uint32_t cpuid)
56{
57 return &cluster->core[cpuid];
58}
59
60static struct cluster_context *get_cluster_data(unsigned long mpidr)
61{
62 uint32_t clusterid;
63
64 clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
65
66 return system_cluster(dormant_data, clusterid);
67}
68
69static struct core_context *get_core_data(unsigned long mpidr)
70{
71 struct cluster_context *cluster;
72 uint32_t cpuid;
73
74 cluster = get_cluster_data(mpidr);
75 cpuid = mpidr & MPIDR_CPU_MASK;
76
77 return cluster_core(cluster, cpuid);
78}
79
80static void mt_save_generic_timer(unsigned long *container)
81{
82 uint64_t ctl;
83 uint64_t val;
84
85 __asm__ volatile("mrs %x0, cntkctl_el1\n\t"
86 "mrs %x1, cntp_cval_el0\n\t"
87 "stp %x0, %x1, [%2, #0]"
88 : "=&r" (ctl), "=&r" (val)
89 : "r" (container)
90 : "memory");
91
92 __asm__ volatile("mrs %x0, cntp_tval_el0\n\t"
93 "mrs %x1, cntp_ctl_el0\n\t"
94 "stp %x0, %x1, [%2, #16]"
95 : "=&r" (val), "=&r" (ctl)
96 : "r" (container)
97 : "memory");
98
99 __asm__ volatile("mrs %x0, cntv_tval_el0\n\t"
100 "mrs %x1, cntv_ctl_el0\n\t"
101 "stp %x0, %x1, [%2, #32]"
102 : "=&r" (val), "=&r" (ctl)
103 : "r" (container)
104 : "memory");
105}
106
107static void mt_restore_generic_timer(unsigned long *container)
108{
109 uint64_t ctl;
110 uint64_t val;
111
112 __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t"
113 "msr cntkctl_el1, %x0\n\t"
114 "msr cntp_cval_el0, %x1"
115 : "=&r" (ctl), "=&r" (val)
116 : "r" (container)
117 : "memory");
118
119 __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t"
120 "msr cntp_tval_el0, %x0\n\t"
121 "msr cntp_ctl_el0, %x1"
122 : "=&r" (val), "=&r" (ctl)
123 : "r" (container)
124 : "memory");
125
126 __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t"
127 "msr cntv_tval_el0, %x0\n\t"
128 "msr cntv_ctl_el0, %x1"
129 : "=&r" (val), "=&r" (ctl)
130 : "r" (container)
131 : "memory");
132}
133
134static void stop_generic_timer(void)
135{
136 /*
137 * Disable the timer and mask the irq to prevent
138 * suprious interrupts on this cpu interface. It
139 * will bite us when we come back if we don't. It
140 * will be replayed on the inbound cluster.
141 */
142 uint64_t cntpctl = read_cntp_ctl_el0();
143
144 write_cntp_ctl_el0(clr_cntp_ctl_enable(cntpctl));
145}
146
147static void mt_cpu_save(unsigned long mpidr)
148{
149 struct core_context *core;
150
151 core = get_core_data(mpidr);
152 mt_save_generic_timer(core->timer_data);
153
154 /* disable timer irq, and upper layer should enable it again. */
155 stop_generic_timer();
156}
157
158static void mt_cpu_restore(unsigned long mpidr)
159{
160 struct core_context *core;
161
162 core = get_core_data(mpidr);
163 mt_restore_generic_timer(core->timer_data);
164}
165
166static void mt_platform_save_context(unsigned long mpidr)
167{
168 /* mcusys_save_context: */
169 mt_cpu_save(mpidr);
170}
171
172static void mt_platform_restore_context(unsigned long mpidr)
173{
174 /* mcusys_restore_context: */
175 mt_cpu_restore(mpidr);
176}
177
178/*******************************************************************************
179* Private function which is used to determine if any platform actions
180* should be performed for the specified affinity instance given its
181* state. Nothing needs to be done if the 'state' is not off or if this is not
182* the highest affinity level which will enter the 'state'.
183*******************************************************************************/
184static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
185{
186 unsigned int max_phys_off_afflvl;
187
188 assert(afflvl <= MPIDR_AFFLVL2);
189
190 if (state != PSCI_STATE_OFF)
191 return -EAGAIN;
192
193 /*
194 * Find the highest affinity level which will be suspended and postpone
195 * all the platform specific actions until that level is hit.
196 */
197 max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
198 assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
199 if (afflvl != max_phys_off_afflvl)
200 return -EAGAIN;
201
202 return 0;
203}
204
205/*******************************************************************************
206 * MTK_platform handler called when an affinity instance is about to enter
207 * standby.
208 ******************************************************************************/
209static void plat_affinst_standby(unsigned int power_state)
210{
211 unsigned int target_afflvl;
212
213 /* Sanity check the requested state */
214 target_afflvl = psci_get_pstate_afflvl(power_state);
215
216 /*
217 * It's possible to enter standby only on affinity level 0 i.e. a cpu
218 * on the MTK_platform. Ignore any other affinity level.
219 */
220 if (target_afflvl == MPIDR_AFFLVL0) {
221 /*
222 * Enter standby state. dsb is good practice before using wfi
223 * to enter low power states.
224 */
225 dsb();
226 wfi();
227 }
228}
229
230/*******************************************************************************
231 * MTK_platform handler called when an affinity instance is about to be turned
232 * on. The level and mpidr determine the affinity instance.
233 ******************************************************************************/
234static int plat_affinst_on(unsigned long mpidr,
235 unsigned long sec_entrypoint,
236 unsigned int afflvl,
237 unsigned int state)
238{
239 int rc = PSCI_E_SUCCESS;
240 unsigned long cpu_id;
241 unsigned long cluster_id;
242 uintptr_t rv;
243
244 /*
245 * It's possible to turn on only affinity level 0 i.e. a cpu
246 * on the MTK_platform. Ignore any other affinity level.
247 */
248 if (afflvl != MPIDR_AFFLVL0)
249 return rc;
250
251 cpu_id = mpidr & MPIDR_CPU_MASK;
252 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
253
254 if (cluster_id)
255 rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
256 else
257 rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
258
259 mmio_write_32(rv, sec_entrypoint);
260 INFO("mt_on[%ld:%ld], entry %x\n",
261 cluster_id, cpu_id, mmio_read_32(rv));
262
263 return rc;
264}
265
266/*******************************************************************************
267 * MTK_platform handler called when an affinity instance is about to be turned
268 * off. The level and mpidr determine the affinity instance. The 'state' arg.
269 * allows the platform to decide whether the cluster is being turned off and
270 * take apt actions.
271 *
272 * CAUTION: This function is called with coherent stacks so that caches can be
273 * turned off, flushed and coherency disabled. There is no guarantee that caches
274 * will remain turned on across calls to this function as each affinity level is
275 * dealt with. So do not write & read global variables across calls. It will be
276 * wise to do flush a write to the global to prevent unpredictable results.
277 ******************************************************************************/
278static void plat_affinst_off(unsigned int afflvl, unsigned int state)
279{
280 unsigned long mpidr = read_mpidr_el1();
281
282 /* Determine if any platform actions need to be executed. */
283 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
284 return;
285
286 /* Prevent interrupts from spuriously waking up this cpu */
287 plat_mt_gic_cpuif_disable();
288
289 trace_power_flow(mpidr, CPU_DOWN);
290
291 if (afflvl != MPIDR_AFFLVL0) {
292 /* Disable coherency if this cluster is to be turned off */
293 plat_cci_disable();
294
295 trace_power_flow(mpidr, CLUSTER_DOWN);
296 }
297}
298
299/*******************************************************************************
300 * MTK_platform handler called when an affinity instance is about to be
301 * suspended. The level and mpidr determine the affinity instance. The 'state'
302 * arg. allows the platform to decide whether the cluster is being turned off
303 * and take apt actions.
304 *
305 * CAUTION: This function is called with coherent stacks so that caches can be
306 * turned off, flushed and coherency disabled. There is no guarantee that caches
307 * will remain turned on across calls to this function as each affinity level is
308 * dealt with. So do not write & read global variables across calls. It will be
309 * wise to do flush a write to the global to prevent unpredictable results.
310 ******************************************************************************/
311static void plat_affinst_suspend(unsigned long sec_entrypoint,
312 unsigned int afflvl,
313 unsigned int state)
314{
315 unsigned long mpidr = read_mpidr_el1();
316 unsigned long cluster_id;
317 unsigned long cpu_id;
318 uintptr_t rv;
319
320 /* Determine if any platform actions need to be executed. */
321 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
322 return;
323
324 cpu_id = mpidr & MPIDR_CPU_MASK;
325 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
326
327 if (cluster_id)
328 rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
329 else
330 rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
331
332 mmio_write_32(rv, sec_entrypoint);
333
334 if (afflvl >= MPIDR_AFFLVL0)
335 mt_platform_save_context(mpidr);
336
337 /* Perform the common cluster specific operations */
338 if (afflvl >= MPIDR_AFFLVL1) {
339 /* Disable coherency if this cluster is to be turned off */
340 plat_cci_disable();
341 disable_scu(mpidr);
342
343 trace_power_flow(mpidr, CLUSTER_SUSPEND);
344 }
345
346 if (afflvl >= MPIDR_AFFLVL2) {
347 /* Prevent interrupts from spuriously waking up this cpu */
348 plat_mt_gic_cpuif_disable();
349 }
350}
351
352/*******************************************************************************
353 * MTK_platform handler called when an affinity instance has just been powered
354 * on after being turned off earlier. The level and mpidr determine the affinity
355 * instance. The 'state' arg. allows the platform to decide whether the cluster
356 * was turned off prior to wakeup and do what's necessary to setup it up
357 * correctly.
358 ******************************************************************************/
359static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
360{
361 unsigned long mpidr = read_mpidr_el1();
362
363 /* Determine if any platform actions need to be executed. */
364 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
365 return;
366
367 /* Perform the common cluster specific operations */
368 if (afflvl >= MPIDR_AFFLVL1) {
369 enable_scu(mpidr);
370
371 /* Enable coherency if this cluster was off */
372 plat_cci_enable();
373 trace_power_flow(mpidr, CLUSTER_UP);
374 }
375
376 /* Enable the gic cpu interface */
377 plat_mt_gic_cpuif_enable();
378 plat_mt_gic_pcpu_init();
379 trace_power_flow(mpidr, CPU_UP);
380}
381
382/*******************************************************************************
383 * MTK_platform handler called when an affinity instance has just been powered
384 * on after having been suspended earlier. The level and mpidr determine the
385 * affinity instance.
386 ******************************************************************************/
387static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
388{
389 unsigned long mpidr = read_mpidr_el1();
390
391 /* Determine if any platform actions need to be executed. */
392 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
393 return;
394
395 if (afflvl >= MPIDR_AFFLVL2) {
396 /* Enable the gic cpu interface */
397 plat_mt_gic_init();
398 plat_mt_gic_cpuif_enable();
399 }
400
401 /* Perform the common cluster specific operations */
402 if (afflvl >= MPIDR_AFFLVL1) {
403 enable_scu(mpidr);
404
405 /* Enable coherency if this cluster was off */
406 plat_cci_enable();
407 trace_power_flow(mpidr, CLUSTER_UP);
408 }
409
410 if (afflvl >= MPIDR_AFFLVL0)
411 mt_platform_restore_context(mpidr);
412
413 plat_mt_gic_pcpu_init();
414}
415
416static unsigned int plat_get_sys_suspend_power_state(void)
417{
418 /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
419 return psci_make_powerstate(0, 1, 2);
420}
421
422/*******************************************************************************
423 * MTK handlers to shutdown/reboot the system
424 ******************************************************************************/
425static void __dead2 plat_system_off(void)
426{
427 INFO("MTK System Off\n");
428 wfi();
429 ERROR("MTK System Off: operation not handled.\n");
430 panic();
431}
432
433static void __dead2 plat_system_reset(void)
434{
435 /* Write the System Configuration Control Register */
436 INFO("MTK System Reset\n");
437
438 mmio_clrbits_32(MTK_WDT_BASE,
439 (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ));
440 mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
441 mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
442
443 wfi();
444 ERROR("MTK System Reset: operation not handled.\n");
445 panic();
446}
447
448/*******************************************************************************
449 * Export the platform handlers to enable psci to invoke them
450 ******************************************************************************/
451static const plat_pm_ops_t plat_plat_pm_ops = {
452 .affinst_standby = plat_affinst_standby,
453 .affinst_on = plat_affinst_on,
454 .affinst_off = plat_affinst_off,
455 .affinst_suspend = plat_affinst_suspend,
456 .affinst_on_finish = plat_affinst_on_finish,
457 .affinst_suspend_finish = plat_affinst_suspend_finish,
458 .system_off = plat_system_off,
459 .system_reset = plat_system_reset,
460 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
461};
462
463/*******************************************************************************
464 * Export the platform specific power ops & initialize the mtk_platform power
465 * controller
466 ******************************************************************************/
467int platform_setup_pm(const plat_pm_ops_t **plat_ops)
468{
469 *plat_ops = &plat_plat_pm_ops;
470 return 0;
471}