blob: 835c3a6b688e5e877ccf65492b23f6efd23216db [file] [log] [blame]
tony.xie54973e72017-04-24 16:18:10 +08001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-armd91aaae2017-05-10 15:16:15 +01004 * SPDX-License-Identifier: BSD-3-Clause
tony.xie54973e72017-04-24 16:18:10 +08005 */
6
7#include <arch_helpers.h>
tony.xie54973e72017-04-24 16:18:10 +08008#include <assert.h>
9#include <bakery_lock.h>
10#include <bl31.h>
11#include <console.h>
Isla Mitchelle3631462017-07-14 10:46:32 +010012#include <debug.h>
tony.xie54973e72017-04-24 16:18:10 +080013#include <delay_timer.h>
14#include <errno.h>
15#include <mmio.h>
Isla Mitchelle3631462017-07-14 10:46:32 +010016#include <plat_private.h>
tony.xie54973e72017-04-24 16:18:10 +080017#include <platform.h>
18#include <platform_def.h>
tony.xie54973e72017-04-24 16:18:10 +080019#include <pmu.h>
tony.xie54973e72017-04-24 16:18:10 +080020#include <pmu_com.h>
Isla Mitchelle3631462017-07-14 10:46:32 +010021#include <rk3328_def.h>
tony.xie54973e72017-04-24 16:18:10 +080022
23DEFINE_BAKERY_LOCK(rockchip_pd_lock);
24
tony.xie54973e72017-04-24 16:18:10 +080025static struct rk3328_sleep_ddr_data ddr_data;
26static __sramdata struct rk3328_sleep_sram_data sram_data;
27
28static uint32_t cpu_warm_boot_addr;
29
30#pragma weak rk3328_pmic_suspend
31#pragma weak rk3328_pmic_resume
32
tony.xie54973e72017-04-24 16:18:10 +080033static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
34{
35 uint32_t pd_reg, apm_reg;
36
37 pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id);
38 apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) &
39 BIT(core_pm_en);
40
41 if (pd_reg && !apm_reg)
42 return core_pwr_pd;
43 else if (!pd_reg && apm_reg)
44 return core_pwr_wfi;
45
46 ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg);
47 while (1)
48 ;
49}
50
51static int cpus_power_domain_on(uint32_t cpu_id)
52{
53 uint32_t cpu_pd, cfg_info;
54
55 cpu_pd = PD_CPU0 + cpu_id;
56 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
57
58 if (cfg_info == core_pwr_pd) {
59 /* disable apm cfg */
60 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
61 CORES_PM_DISABLE);
62
63 /* if the cores have be on, power off it firstly */
64 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
65 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
66 CORES_PM_DISABLE);
67 pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
68 }
69 pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
70 } else {
71 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
72 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
73 return -EINVAL;
74 }
75
76 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
77 BIT(core_pm_sft_wakeup_en));
78 }
79
80 return 0;
81}
82
83static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
84{
85 uint32_t cpu_pd, core_pm_value;
86
87 cpu_pd = PD_CPU0 + cpu_id;
88 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
89 return 0;
90
91 if (pd_cfg == core_pwr_pd) {
92 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
93 return -EINVAL;
94 /* disable apm cfg */
95 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
96 CORES_PM_DISABLE);
97 pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
98 } else {
99 core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
100 if (pd_cfg == core_pwr_wfi_int)
101 core_pm_value |= BIT(core_pm_int_wakeup_en);
102
103 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
104 core_pm_value);
105 }
106
107 return 0;
108}
109
110static void nonboot_cpus_off(void)
111{
112 uint32_t boot_cpu, cpu;
113
114 /* turn off noboot cpus */
115 boot_cpu = plat_my_core_pos();
116 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
117 if (cpu == boot_cpu)
118 continue;
119 cpus_power_domain_off(cpu, core_pwr_pd);
120 }
121}
122
Lin Huang30e43392017-05-04 16:02:45 +0800123void sram_save(void)
124{
125 /* TODO: support the sdram save for rk3328 SoCs*/
126}
127
128void sram_restore(void)
129{
130 /* TODO: support the sdram restore for rk3328 SoCs */
131}
132
tony.xie54973e72017-04-24 16:18:10 +0800133int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
134{
135 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
136
tony.xie5f5b53b2017-05-15 10:36:14 +0800137 assert(cpu_id < PLATFORM_CORE_COUNT);
tony.xie54973e72017-04-24 16:18:10 +0800138 assert(cpuson_flags[cpu_id] == 0);
139 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
140 cpuson_entry_point[cpu_id] = entrypoint;
141 dsb();
142
143 cpus_power_domain_on(cpu_id);
144
145 return 0;
146}
147
148int rockchip_soc_cores_pwr_dm_off(void)
149{
150 uint32_t cpu_id = plat_my_core_pos();
151
152 cpus_power_domain_off(cpu_id, core_pwr_wfi);
153
154 return 0;
155}
156
157int rockchip_soc_cores_pwr_dm_suspend(void)
158{
159 uint32_t cpu_id = plat_my_core_pos();
160
tony.xie5f5b53b2017-05-15 10:36:14 +0800161 assert(cpu_id < PLATFORM_CORE_COUNT);
tony.xie54973e72017-04-24 16:18:10 +0800162 assert(cpuson_flags[cpu_id] == 0);
163 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
164 cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint();
165 dsb();
166
167 cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
168
169 return 0;
170}
171
172int rockchip_soc_cores_pwr_dm_on_finish(void)
173{
174 uint32_t cpu_id = plat_my_core_pos();
175
176 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
177
178 return 0;
179}
180
181int rockchip_soc_cores_pwr_dm_resume(void)
182{
183 uint32_t cpu_id = plat_my_core_pos();
184
185 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
186
187 return 0;
188}
189
190void __dead2 rockchip_soc_soft_reset(void)
191{
192 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID));
193 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID));
194 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID));
195 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID));
196 dsb();
197
198 mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
199 dsb();
200 /*
201 * Maybe the HW needs some times to reset the system,
202 * so we do not hope the core to excute valid codes.
203 */
204 while (1)
205 ;
206}
207
208/*
209 * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
210 * If the PMIC is configed for responding the sleep pin to power off it,
211 * once the pin is output high, it will get the pmic power off.
212 */
213void __dead2 rockchip_soc_system_off(void)
214{
215 uint32_t val;
216
217 /* gpio config */
218 val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX);
219 val &= ~GPIO2_D2_GPIO_MODE;
220 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val);
221
222 /* config output */
223 val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR);
224 val |= GPIO2_D2;
225 mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val);
226
227 /* config output high level */
228 val = mmio_read_32(GPIO2_BASE);
229 val |= GPIO2_D2;
230 mmio_write_32(GPIO2_BASE, val);
231 dsb();
232
233 while (1)
234 ;
235}
236
237static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = {
238 0x187f, 0x0000, 0x010c, 0x0000, 0x0200,
239 0x0010, 0x0000, 0x0017, 0x001f, 0x0000,
240 0x0000, 0x0000, 0x0000, 0x0003, 0x0000,
241 0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000,
242 0x0000, 0x0000, 0x0010, 0x0000, 0x0000,
243 0x0000, 0x0000, 0x0003, 0x0008
244};
245
246static void clks_gating_suspend(uint32_t *ungt_msk)
247{
248 int i;
249
250 for (i = 0; i < CRU_CLKGATE_NUMS; i++) {
251 ddr_data.clk_ungt_save[i] =
252 mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
253 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
254 ((~ungt_msk[i]) << 16) | 0xffff);
255 }
256}
257
258static void clks_gating_resume(void)
259{
260 int i;
261
262 for (i = 0; i < CRU_CLKGATE_NUMS; i++)
263 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
264 ddr_data.clk_ungt_save[i] | 0xffff0000);
265}
266
267static inline void pm_pll_wait_lock(uint32_t pll_id)
268{
269 uint32_t delay = PLL_LOCKED_TIMEOUT;
270
271 while (delay > 0) {
272 if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) &
273 PLL_IS_LOCKED)
274 break;
275 delay--;
276 }
277 if (delay == 0)
278 ERROR("lock-pll: %d\n", pll_id);
279}
280
281static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd)
282{
283 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
284 BITS_WITH_WMASK(1, 1, 15));
285 if (pd)
286 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
287 BITS_WITH_WMASK(1, 1, 14));
288 else
289 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
290 BITS_WITH_WMASK(0, 1, 14));
291}
292
293static __sramfunc void dpll_suspend(void)
294{
295 int i;
296
297 /* slow mode */
298 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID));
299
300 /* save pll con */
301 for (i = 0; i < CRU_PLL_CON_NUMS; i++)
302 sram_data.dpll_con_save[i] =
303 mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i));
304 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
305 BITS_WITH_WMASK(1, 1, 15));
306 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
307 BITS_WITH_WMASK(1, 1, 14));
308}
309
310static __sramfunc void dpll_resume(void)
311{
312 uint32_t delay = PLL_LOCKED_TIMEOUT;
313
314 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
315 BITS_WITH_WMASK(1, 1, 15));
316 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
317 BITS_WITH_WMASK(0, 1, 14));
318 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
319 sram_data.dpll_con_save[1] | 0xc0000000);
320
321 dsb();
322
323 while (delay > 0) {
324 if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) &
325 PLL_IS_LOCKED)
326 break;
327 delay--;
328 }
329 if (delay == 0)
330 while (1)
331 ;
332
333 mmio_write_32(CRU_BASE + CRU_CRU_MODE,
334 PLL_NORM_MODE(DPLL_ID));
335}
336
337static inline void pll_suspend(uint32_t pll_id)
338{
339 int i;
340
341 /* slow mode */
342 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id));
343
344 /* save pll con */
345 for (i = 0; i < CRU_PLL_CON_NUMS; i++)
346 ddr_data.cru_plls_con_save[pll_id][i] =
347 mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i));
348
349 /* powerdown pll */
350 pll_pwr_dwn(pll_id, pmu_pd_off);
351}
352
353static inline void pll_resume(uint32_t pll_id)
354{
355 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
356 ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000);
357
358 pm_pll_wait_lock(pll_id);
359
360 if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id))
361 mmio_write_32(CRU_BASE + CRU_CRU_MODE,
362 PLL_NORM_MODE(pll_id));
363}
364
365static void pm_plls_suspend(void)
366{
367 ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE);
368 ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0));
369 ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1));
370 ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18));
371 ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20));
372 ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24));
373 ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38));
374 pll_suspend(NPLL_ID);
375 pll_suspend(CPLL_ID);
376 pll_suspend(GPLL_ID);
377 pll_suspend(APLL_ID);
378
379 /* core */
380 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
381 BITS_WITH_WMASK(0, 0x1f, 0));
382
383 /* pclk_dbg */
384 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
385 BITS_WITH_WMASK(0, 0xf, 0));
386
387 /* crypto */
388 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
389 BITS_WITH_WMASK(0, 0x1f, 0));
390
391 /* pwm0 */
392 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
393 BITS_WITH_WMASK(0, 0x7f, 8));
394
395 /* uart2 from 24M */
396 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
397 BITS_WITH_WMASK(2, 0x3, 8));
398
399 /* clk_rtc32k */
400 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
401 BITS_WITH_WMASK(767, 0x3fff, 0) |
402 BITS_WITH_WMASK(2, 0x3, 14));
403}
404
405static void pm_plls_resume(void)
406{
407 /* clk_rtc32k */
408 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
409 ddr_data.clk_sel38 |
410 BITS_WMSK(0x3fff, 0) |
411 BITS_WMSK(0x3, 14));
412
413 /* uart2 */
414 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
415 ddr_data.clk_sel18 | BITS_WMSK(0x3, 8));
416
417 /* pwm0 */
418 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
419 ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8));
420
421 /* crypto */
422 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
423 ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0));
424
425 /* pclk_dbg */
426 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
427 ddr_data.clk_sel1 | BITS_WMSK(0xf, 0));
428
429 /* core */
430 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
431 ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0));
432
433 pll_pwr_dwn(APLL_ID, pmu_pd_on);
434 pll_pwr_dwn(GPLL_ID, pmu_pd_on);
435 pll_pwr_dwn(CPLL_ID, pmu_pd_on);
436 pll_pwr_dwn(NPLL_ID, pmu_pd_on);
437
438 pll_resume(APLL_ID);
439 pll_resume(GPLL_ID);
440 pll_resume(CPLL_ID);
441 pll_resume(NPLL_ID);
442}
443
444#define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
445
446static __sramfunc void sram_udelay(uint32_t us)
447{
448 uint64_t pct_orig, pct_now;
449 uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us;
450
451 isb();
452 pct_orig = read_cntpct_el0();
453
454 do {
455 isb();
456 pct_now = read_cntpct_el0();
457 } while ((pct_now - pct_orig) <= to_wait);
458}
459
460/*
461 * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
462 * If the PMIC is configed for responding the sleep pin
463 * to get it into sleep mode,
464 * once the pin is output high, it will get the pmic into sleep mode.
465 */
466__sramfunc void rk3328_pmic_suspend(void)
467{
468 sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG);
469 sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4);
470 sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE);
471 mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4));
472 mmio_write_32(GPIO2_BASE + 4,
473 sram_data.pmic_sleep_gpio_save[1] | BIT(26));
474 mmio_write_32(GPIO2_BASE,
475 sram_data.pmic_sleep_gpio_save[0] | BIT(26));
476}
477
478__sramfunc void rk3328_pmic_resume(void)
479{
480 mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]);
481 mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]);
482 mmio_write_32(GRF_BASE + PMIC_SLEEP_REG,
483 sram_data.pmic_sleep_save | BITS_WMSK(0xffff, 0));
484 /* Resuming volt need a lot of time */
485 sram_udelay(100);
486}
487
tony.xie54973e72017-04-24 16:18:10 +0800488static __sramfunc void ddr_suspend(void)
489{
490 sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE +
491 DDR_PCTL2_PWRCTL);
492 sram_data.pd_sr_idle_save &= SELFREF_EN;
493
494 mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN);
495 sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE +
496 DDRGRF_SOC_CON(0));
497 mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15));
498
499 /*
500 * Override csysreq from ddrc and
501 * send valid csysreq signal to PMU,
502 * csysreq is controlled by ddrc only
503 */
504
505 /* in self-refresh */
506 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
507 while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
508 (0x03 << 12)) != (0x02 << 12))
509 ;
510 /* ddr retention */
511 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
512
513 /* ddr gating */
514 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
515 BITS_WITH_WMASK(0x7, 0x7, 4));
516 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
517 BITS_WITH_WMASK(1, 1, 4));
518 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
519 BITS_WITH_WMASK(0x1ff, 0x1ff, 1));
520 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
521 BITS_WITH_WMASK(0x3, 0x3, 0));
522
523 dpll_suspend();
524}
525
Lin Huang30e43392017-05-04 16:02:45 +0800526__sramfunc void dmc_restore(void)
tony.xie54973e72017-04-24 16:18:10 +0800527{
528 dpll_resume();
529
530 /* ddr gating */
531 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
532 BITS_WITH_WMASK(0, 0x7, 4));
533 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
534 BITS_WITH_WMASK(0, 1, 4));
535 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
536 BITS_WITH_WMASK(0, 0x1ff, 1));
537 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
538 BITS_WITH_WMASK(0, 0x3, 0));
539
540 /* ddr de_retention */
541 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
542 /* exit self-refresh */
543 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
544 while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
545 (0x03 << 12)) != (0x00 << 12))
546 ;
547
548 mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000);
549 if (sram_data.pd_sr_idle_save)
550 mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL,
551 SELFREF_EN);
552}
553
554static __sramfunc void sram_dbg_uart_suspend(void)
555{
556 sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER);
557 mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE);
558 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000);
559 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004);
560}
561
Lin Huang30e43392017-05-04 16:02:45 +0800562__sramfunc void sram_dbg_uart_resume(void)
tony.xie54973e72017-04-24 16:18:10 +0800563{
564 /* restore uart clk and reset fifo */
565 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000);
566 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000);
567 mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET);
568 mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier);
569}
570
571static __sramfunc void sram_soc_enter_lp(void)
572{
573 uint32_t apm_value;
574
575 apm_value = BIT(core_pm_en) |
576 BIT(core_pm_dis_int) |
577 BIT(core_pm_int_wakeup_en);
578 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value);
579
580 dsb();
581 isb();
582err_loop:
583 wfi();
584 /*
585 *Soc will enter low power mode and
586 *do not return to here.
587 */
588 goto err_loop;
589}
590
591__sramfunc void sram_suspend(void)
592{
593 /* disable mmu and icache */
tony.xie54973e72017-04-24 16:18:10 +0800594 disable_mmu_icache_el3();
Antonio Nino Diazeb24dff2018-02-19 13:53:48 +0000595 tlbialle3();
596 dsbsy();
597 isb();
tony.xie54973e72017-04-24 16:18:10 +0800598
599 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
Lin Huang30e43392017-05-04 16:02:45 +0800600 ((uintptr_t)&pmu_cpuson_entrypoint >> CPU_BOOT_ADDR_ALIGN) |
tony.xie54973e72017-04-24 16:18:10 +0800601 CPU_BOOT_ADDR_WMASK);
602
603 /* ddr self-refresh and gating phy */
604 ddr_suspend();
605
606 rk3328_pmic_suspend();
607
608 sram_dbg_uart_suspend();
609
610 sram_soc_enter_lp();
611}
612
tony.xie54973e72017-04-24 16:18:10 +0800613void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
614{
tony.xie54973e72017-04-24 16:18:10 +0800615 sram_suspend();
616
617 /* should never reach here */
618 psci_power_down_wfi();
619}
620
621int rockchip_soc_sys_pwr_dm_suspend(void)
622{
623 clks_gating_suspend(clk_ungt_msk);
624
625 pm_plls_suspend();
626
627 return 0;
628}
629
630int rockchip_soc_sys_pwr_dm_resume(void)
631{
632 pm_plls_resume();
633
634 clks_gating_resume();
635
636 plat_rockchip_gic_cpuif_enable();
637
638 return 0;
639}
640
Lin Huang30e43392017-05-04 16:02:45 +0800641void rockchip_plat_mmu_el3(void)
642{
643 /* TODO: support the el3 for rk3328 SoCs */
644}
645
tony.xie54973e72017-04-24 16:18:10 +0800646void plat_rockchip_pmu_init(void)
647{
648 uint32_t cpu;
649
650 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
651 cpuson_flags[cpu] = 0;
652
653 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
tony.xie54973e72017-04-24 16:18:10 +0800654
655 /* the warm booting address of cpus */
656 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
657 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
658 CPU_BOOT_ADDR_WMASK);
659
660 nonboot_cpus_off();
661
662 INFO("%s: pd status 0x%x\n",
663 __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
664}