blob: 96fdab02f90df0c38d6b9f6e1c6ea167c62f5788 [file] [log] [blame]
developer550bf5e2016-07-11 16:05:23 +08001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
32#include <arm_gic.h>
33#include <assert.h>
34#include <bakery_lock.h>
35#include <cci.h>
36#include <console.h>
37#include <debug.h>
38#include <errno.h>
39#include <mcucfg.h>
40#include <mmio.h>
41#include <platform_def.h>
42#include <plat_private.h>
43#include <power_tracer.h>
44#include <psci.h>
45#include <scu.h>
46
47struct core_context {
48 unsigned long timer_data[8];
49 unsigned int count;
50 unsigned int rst;
51 unsigned int abt;
52 unsigned int brk;
53};
54
55struct cluster_context {
56 struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
57};
58
59/*
60 * Top level structure to hold the complete context of a multi cluster system
61 */
62struct system_context {
63 struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
64};
65
66/*
67 * Top level structure which encapsulates the context of the entire system
68 */
69static struct system_context dormant_data[1];
70
71static inline struct cluster_context *system_cluster(
72 struct system_context *system,
73 uint32_t clusterid)
74{
75 return &system->cluster[clusterid];
76}
77
78static inline struct core_context *cluster_core(struct cluster_context *cluster,
79 uint32_t cpuid)
80{
81 return &cluster->core[cpuid];
82}
83
84static struct cluster_context *get_cluster_data(unsigned long mpidr)
85{
86 uint32_t clusterid;
87
88 clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
89
90 return system_cluster(dormant_data, clusterid);
91}
92
93static struct core_context *get_core_data(unsigned long mpidr)
94{
95 struct cluster_context *cluster;
96 uint32_t cpuid;
97
98 cluster = get_cluster_data(mpidr);
99 cpuid = mpidr & MPIDR_CPU_MASK;
100
101 return cluster_core(cluster, cpuid);
102}
103
104static void mt_save_generic_timer(unsigned long *container)
105{
106 uint64_t ctl;
107 uint64_t val;
108
109 __asm__ volatile("mrs %x0, cntkctl_el1\n\t"
110 "mrs %x1, cntp_cval_el0\n\t"
111 "stp %x0, %x1, [%2, #0]"
112 : "=&r" (ctl), "=&r" (val)
113 : "r" (container)
114 : "memory");
115
116 __asm__ volatile("mrs %x0, cntp_tval_el0\n\t"
117 "mrs %x1, cntp_ctl_el0\n\t"
118 "stp %x0, %x1, [%2, #16]"
119 : "=&r" (val), "=&r" (ctl)
120 : "r" (container)
121 : "memory");
122
123 __asm__ volatile("mrs %x0, cntv_tval_el0\n\t"
124 "mrs %x1, cntv_ctl_el0\n\t"
125 "stp %x0, %x1, [%2, #32]"
126 : "=&r" (val), "=&r" (ctl)
127 : "r" (container)
128 : "memory");
129}
130
131static void mt_restore_generic_timer(unsigned long *container)
132{
133 uint64_t ctl;
134 uint64_t val;
135
136 __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t"
137 "msr cntkctl_el1, %x0\n\t"
138 "msr cntp_cval_el0, %x1"
139 : "=&r" (ctl), "=&r" (val)
140 : "r" (container)
141 : "memory");
142
143 __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t"
144 "msr cntp_tval_el0, %x0\n\t"
145 "msr cntp_ctl_el0, %x1"
146 : "=&r" (val), "=&r" (ctl)
147 : "r" (container)
148 : "memory");
149
150 __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t"
151 "msr cntv_tval_el0, %x0\n\t"
152 "msr cntv_ctl_el0, %x1"
153 : "=&r" (val), "=&r" (ctl)
154 : "r" (container)
155 : "memory");
156}
157
158static void stop_generic_timer(void)
159{
160 /*
161 * Disable the timer and mask the irq to prevent
162 * suprious interrupts on this cpu interface. It
163 * will bite us when we come back if we don't. It
164 * will be replayed on the inbound cluster.
165 */
166 uint64_t cntpctl = read_cntp_ctl_el0();
167
168 write_cntp_ctl_el0(clr_cntp_ctl_enable(cntpctl));
169}
170
171static void mt_cpu_save(unsigned long mpidr)
172{
173 struct core_context *core;
174
175 core = get_core_data(mpidr);
176 mt_save_generic_timer(core->timer_data);
177
178 /* disable timer irq, and upper layer should enable it again. */
179 stop_generic_timer();
180}
181
182static void mt_cpu_restore(unsigned long mpidr)
183{
184 struct core_context *core;
185
186 core = get_core_data(mpidr);
187 mt_restore_generic_timer(core->timer_data);
188}
189
190static void mt_platform_save_context(unsigned long mpidr)
191{
192 /* mcusys_save_context: */
193 mt_cpu_save(mpidr);
194}
195
196static void mt_platform_restore_context(unsigned long mpidr)
197{
198 /* mcusys_restore_context: */
199 mt_cpu_restore(mpidr);
200}
201
202/*******************************************************************************
203* Private function which is used to determine if any platform actions
204* should be performed for the specified affinity instance given its
205* state. Nothing needs to be done if the 'state' is not off or if this is not
206* the highest affinity level which will enter the 'state'.
207*******************************************************************************/
208static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
209{
210 unsigned int max_phys_off_afflvl;
211
212 assert(afflvl <= MPIDR_AFFLVL2);
213
214 if (state != PSCI_STATE_OFF)
215 return -EAGAIN;
216
217 /*
218 * Find the highest affinity level which will be suspended and postpone
219 * all the platform specific actions until that level is hit.
220 */
221 max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
222 assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
223 if (afflvl != max_phys_off_afflvl)
224 return -EAGAIN;
225
226 return 0;
227}
228
229/*******************************************************************************
230 * MTK_platform handler called when an affinity instance is about to enter
231 * standby.
232 ******************************************************************************/
233static void plat_affinst_standby(unsigned int power_state)
234{
235 unsigned int target_afflvl;
236
237 /* Sanity check the requested state */
238 target_afflvl = psci_get_pstate_afflvl(power_state);
239
240 /*
241 * It's possible to enter standby only on affinity level 0 i.e. a cpu
242 * on the MTK_platform. Ignore any other affinity level.
243 */
244 if (target_afflvl == MPIDR_AFFLVL0) {
245 /*
246 * Enter standby state. dsb is good practice before using wfi
247 * to enter low power states.
248 */
249 dsb();
250 wfi();
251 }
252}
253
254/*******************************************************************************
255 * MTK_platform handler called when an affinity instance is about to be turned
256 * on. The level and mpidr determine the affinity instance.
257 ******************************************************************************/
258static int plat_affinst_on(unsigned long mpidr,
259 unsigned long sec_entrypoint,
260 unsigned int afflvl,
261 unsigned int state)
262{
263 int rc = PSCI_E_SUCCESS;
264 unsigned long cpu_id;
265 unsigned long cluster_id;
266 uintptr_t rv;
267
268 /*
269 * It's possible to turn on only affinity level 0 i.e. a cpu
270 * on the MTK_platform. Ignore any other affinity level.
271 */
272 if (afflvl != MPIDR_AFFLVL0)
273 return rc;
274
275 cpu_id = mpidr & MPIDR_CPU_MASK;
276 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
277
278 if (cluster_id)
279 rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
280 else
281 rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
282
283 mmio_write_32(rv, sec_entrypoint);
284 INFO("mt_on[%ld:%ld], entry %x\n",
285 cluster_id, cpu_id, mmio_read_32(rv));
286
287 return rc;
288}
289
290/*******************************************************************************
291 * MTK_platform handler called when an affinity instance is about to be turned
292 * off. The level and mpidr determine the affinity instance. The 'state' arg.
293 * allows the platform to decide whether the cluster is being turned off and
294 * take apt actions.
295 *
296 * CAUTION: This function is called with coherent stacks so that caches can be
297 * turned off, flushed and coherency disabled. There is no guarantee that caches
298 * will remain turned on across calls to this function as each affinity level is
299 * dealt with. So do not write & read global variables across calls. It will be
300 * wise to do flush a write to the global to prevent unpredictable results.
301 ******************************************************************************/
302static void plat_affinst_off(unsigned int afflvl, unsigned int state)
303{
304 unsigned long mpidr = read_mpidr_el1();
305
306 /* Determine if any platform actions need to be executed. */
307 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
308 return;
309
310 /* Prevent interrupts from spuriously waking up this cpu */
311 plat_mt_gic_cpuif_disable();
312
313 trace_power_flow(mpidr, CPU_DOWN);
314
315 if (afflvl != MPIDR_AFFLVL0) {
316 /* Disable coherency if this cluster is to be turned off */
317 plat_cci_disable();
318
319 trace_power_flow(mpidr, CLUSTER_DOWN);
320 }
321}
322
323/*******************************************************************************
324 * MTK_platform handler called when an affinity instance is about to be
325 * suspended. The level and mpidr determine the affinity instance. The 'state'
326 * arg. allows the platform to decide whether the cluster is being turned off
327 * and take apt actions.
328 *
329 * CAUTION: This function is called with coherent stacks so that caches can be
330 * turned off, flushed and coherency disabled. There is no guarantee that caches
331 * will remain turned on across calls to this function as each affinity level is
332 * dealt with. So do not write & read global variables across calls. It will be
333 * wise to do flush a write to the global to prevent unpredictable results.
334 ******************************************************************************/
335static void plat_affinst_suspend(unsigned long sec_entrypoint,
336 unsigned int afflvl,
337 unsigned int state)
338{
339 unsigned long mpidr = read_mpidr_el1();
340 unsigned long cluster_id;
341 unsigned long cpu_id;
342 uintptr_t rv;
343
344 /* Determine if any platform actions need to be executed. */
345 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
346 return;
347
348 cpu_id = mpidr & MPIDR_CPU_MASK;
349 cluster_id = mpidr & MPIDR_CLUSTER_MASK;
350
351 if (cluster_id)
352 rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
353 else
354 rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
355
356 mmio_write_32(rv, sec_entrypoint);
357
358 if (afflvl >= MPIDR_AFFLVL0)
359 mt_platform_save_context(mpidr);
360
361 /* Perform the common cluster specific operations */
362 if (afflvl >= MPIDR_AFFLVL1) {
363 /* Disable coherency if this cluster is to be turned off */
364 plat_cci_disable();
365 disable_scu(mpidr);
366
367 trace_power_flow(mpidr, CLUSTER_SUSPEND);
368 }
369
370 if (afflvl >= MPIDR_AFFLVL2) {
371 /* Prevent interrupts from spuriously waking up this cpu */
372 plat_mt_gic_cpuif_disable();
373 }
374}
375
376/*******************************************************************************
377 * MTK_platform handler called when an affinity instance has just been powered
378 * on after being turned off earlier. The level and mpidr determine the affinity
379 * instance. The 'state' arg. allows the platform to decide whether the cluster
380 * was turned off prior to wakeup and do what's necessary to setup it up
381 * correctly.
382 ******************************************************************************/
383static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
384{
385 unsigned long mpidr = read_mpidr_el1();
386
387 /* Determine if any platform actions need to be executed. */
388 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
389 return;
390
391 /* Perform the common cluster specific operations */
392 if (afflvl >= MPIDR_AFFLVL1) {
393 enable_scu(mpidr);
394
395 /* Enable coherency if this cluster was off */
396 plat_cci_enable();
397 trace_power_flow(mpidr, CLUSTER_UP);
398 }
399
400 /* Enable the gic cpu interface */
401 plat_mt_gic_cpuif_enable();
402 plat_mt_gic_pcpu_init();
403 trace_power_flow(mpidr, CPU_UP);
404}
405
406/*******************************************************************************
407 * MTK_platform handler called when an affinity instance has just been powered
408 * on after having been suspended earlier. The level and mpidr determine the
409 * affinity instance.
410 ******************************************************************************/
411static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
412{
413 unsigned long mpidr = read_mpidr_el1();
414
415 /* Determine if any platform actions need to be executed. */
416 if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
417 return;
418
419 if (afflvl >= MPIDR_AFFLVL2) {
420 /* Enable the gic cpu interface */
421 plat_mt_gic_init();
422 plat_mt_gic_cpuif_enable();
423 }
424
425 /* Perform the common cluster specific operations */
426 if (afflvl >= MPIDR_AFFLVL1) {
427 enable_scu(mpidr);
428
429 /* Enable coherency if this cluster was off */
430 plat_cci_enable();
431 trace_power_flow(mpidr, CLUSTER_UP);
432 }
433
434 if (afflvl >= MPIDR_AFFLVL0)
435 mt_platform_restore_context(mpidr);
436
437 plat_mt_gic_pcpu_init();
438}
439
440static unsigned int plat_get_sys_suspend_power_state(void)
441{
442 /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
443 return psci_make_powerstate(0, 1, 2);
444}
445
446/*******************************************************************************
447 * MTK handlers to shutdown/reboot the system
448 ******************************************************************************/
449static void __dead2 plat_system_off(void)
450{
451 INFO("MTK System Off\n");
452 wfi();
453 ERROR("MTK System Off: operation not handled.\n");
454 panic();
455}
456
457static void __dead2 plat_system_reset(void)
458{
459 /* Write the System Configuration Control Register */
460 INFO("MTK System Reset\n");
461
462 mmio_clrbits_32(MTK_WDT_BASE,
463 (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ));
464 mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
465 mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
466
467 wfi();
468 ERROR("MTK System Reset: operation not handled.\n");
469 panic();
470}
471
472/*******************************************************************************
473 * Export the platform handlers to enable psci to invoke them
474 ******************************************************************************/
475static const plat_pm_ops_t plat_plat_pm_ops = {
476 .affinst_standby = plat_affinst_standby,
477 .affinst_on = plat_affinst_on,
478 .affinst_off = plat_affinst_off,
479 .affinst_suspend = plat_affinst_suspend,
480 .affinst_on_finish = plat_affinst_on_finish,
481 .affinst_suspend_finish = plat_affinst_suspend_finish,
482 .system_off = plat_system_off,
483 .system_reset = plat_system_reset,
484 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
485};
486
487/*******************************************************************************
488 * Export the platform specific power ops & initialize the mtk_platform power
489 * controller
490 ******************************************************************************/
491int platform_setup_pm(const plat_pm_ops_t **plat_ops)
492{
493 *plat_ops = &plat_plat_pm_ops;
494 return 0;
495}