blob: 974b7c03685357e1ce8739aba54264e0b8d71880 [file] [log] [blame]
tony.xie54973e72017-04-24 16:18:10 +08001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
32#include <debug.h>
33#include <assert.h>
34#include <bakery_lock.h>
35#include <bl31.h>
36#include <console.h>
37#include <delay_timer.h>
38#include <errno.h>
39#include <mmio.h>
40#include <platform.h>
41#include <platform_def.h>
42#include <plat_private.h>
43#include <pmu_sram.h>
44#include <pmu.h>
45#include <rk3328_def.h>
46#include <pmu_com.h>
47
48DEFINE_BAKERY_LOCK(rockchip_pd_lock);
49
50static struct psram_data_t *psram_sleep_cfg =
51 (struct psram_data_t *)PSRAM_DT_BASE;
52
53static struct rk3328_sleep_ddr_data ddr_data;
54static __sramdata struct rk3328_sleep_sram_data sram_data;
55
56static uint32_t cpu_warm_boot_addr;
57
58#pragma weak rk3328_pmic_suspend
59#pragma weak rk3328_pmic_resume
60
61void plat_rockchip_pmusram_prepare(void)
62{
63 uint32_t *sram_dst, *sram_src;
64 size_t sram_size = 2;
65 /*
66 * pmu sram code and data prepare
67 */
68 sram_dst = (uint32_t *)PMUSRAM_BASE;
69 sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
70 sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
71 (uint32_t *)sram_src;
72 u32_align_cpy(sram_dst, sram_src, sram_size);
73
74 psram_sleep_cfg->sp = PSRAM_DT_BASE;
75}
76
77static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
78{
79 uint32_t pd_reg, apm_reg;
80
81 pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id);
82 apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) &
83 BIT(core_pm_en);
84
85 if (pd_reg && !apm_reg)
86 return core_pwr_pd;
87 else if (!pd_reg && apm_reg)
88 return core_pwr_wfi;
89
90 ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg);
91 while (1)
92 ;
93}
94
95static int cpus_power_domain_on(uint32_t cpu_id)
96{
97 uint32_t cpu_pd, cfg_info;
98
99 cpu_pd = PD_CPU0 + cpu_id;
100 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
101
102 if (cfg_info == core_pwr_pd) {
103 /* disable apm cfg */
104 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
105 CORES_PM_DISABLE);
106
107 /* if the cores have be on, power off it firstly */
108 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
109 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
110 CORES_PM_DISABLE);
111 pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
112 }
113 pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
114 } else {
115 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
116 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
117 return -EINVAL;
118 }
119
120 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
121 BIT(core_pm_sft_wakeup_en));
122 }
123
124 return 0;
125}
126
127static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
128{
129 uint32_t cpu_pd, core_pm_value;
130
131 cpu_pd = PD_CPU0 + cpu_id;
132 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
133 return 0;
134
135 if (pd_cfg == core_pwr_pd) {
136 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
137 return -EINVAL;
138 /* disable apm cfg */
139 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
140 CORES_PM_DISABLE);
141 pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
142 } else {
143 core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
144 if (pd_cfg == core_pwr_wfi_int)
145 core_pm_value |= BIT(core_pm_int_wakeup_en);
146
147 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
148 core_pm_value);
149 }
150
151 return 0;
152}
153
154static void nonboot_cpus_off(void)
155{
156 uint32_t boot_cpu, cpu;
157
158 /* turn off noboot cpus */
159 boot_cpu = plat_my_core_pos();
160 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
161 if (cpu == boot_cpu)
162 continue;
163 cpus_power_domain_off(cpu, core_pwr_pd);
164 }
165}
166
167int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
168{
169 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
170
171 assert(cpuson_flags[cpu_id] == 0);
172 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
173 cpuson_entry_point[cpu_id] = entrypoint;
174 dsb();
175
176 cpus_power_domain_on(cpu_id);
177
178 return 0;
179}
180
181int rockchip_soc_cores_pwr_dm_off(void)
182{
183 uint32_t cpu_id = plat_my_core_pos();
184
185 cpus_power_domain_off(cpu_id, core_pwr_wfi);
186
187 return 0;
188}
189
190int rockchip_soc_cores_pwr_dm_suspend(void)
191{
192 uint32_t cpu_id = plat_my_core_pos();
193
194 assert(cpuson_flags[cpu_id] == 0);
195 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
196 cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint();
197 dsb();
198
199 cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
200
201 return 0;
202}
203
204int rockchip_soc_cores_pwr_dm_on_finish(void)
205{
206 uint32_t cpu_id = plat_my_core_pos();
207
208 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
209
210 return 0;
211}
212
213int rockchip_soc_cores_pwr_dm_resume(void)
214{
215 uint32_t cpu_id = plat_my_core_pos();
216
217 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
218
219 return 0;
220}
221
222void __dead2 rockchip_soc_soft_reset(void)
223{
224 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID));
225 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID));
226 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID));
227 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID));
228 dsb();
229
230 mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
231 dsb();
232 /*
233 * Maybe the HW needs some times to reset the system,
234 * so we do not hope the core to excute valid codes.
235 */
236 while (1)
237 ;
238}
239
240/*
241 * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
242 * If the PMIC is configed for responding the sleep pin to power off it,
243 * once the pin is output high, it will get the pmic power off.
244 */
245void __dead2 rockchip_soc_system_off(void)
246{
247 uint32_t val;
248
249 /* gpio config */
250 val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX);
251 val &= ~GPIO2_D2_GPIO_MODE;
252 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val);
253
254 /* config output */
255 val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR);
256 val |= GPIO2_D2;
257 mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val);
258
259 /* config output high level */
260 val = mmio_read_32(GPIO2_BASE);
261 val |= GPIO2_D2;
262 mmio_write_32(GPIO2_BASE, val);
263 dsb();
264
265 while (1)
266 ;
267}
268
269static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = {
270 0x187f, 0x0000, 0x010c, 0x0000, 0x0200,
271 0x0010, 0x0000, 0x0017, 0x001f, 0x0000,
272 0x0000, 0x0000, 0x0000, 0x0003, 0x0000,
273 0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000,
274 0x0000, 0x0000, 0x0010, 0x0000, 0x0000,
275 0x0000, 0x0000, 0x0003, 0x0008
276};
277
278static void clks_gating_suspend(uint32_t *ungt_msk)
279{
280 int i;
281
282 for (i = 0; i < CRU_CLKGATE_NUMS; i++) {
283 ddr_data.clk_ungt_save[i] =
284 mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
285 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
286 ((~ungt_msk[i]) << 16) | 0xffff);
287 }
288}
289
290static void clks_gating_resume(void)
291{
292 int i;
293
294 for (i = 0; i < CRU_CLKGATE_NUMS; i++)
295 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
296 ddr_data.clk_ungt_save[i] | 0xffff0000);
297}
298
299static inline void pm_pll_wait_lock(uint32_t pll_id)
300{
301 uint32_t delay = PLL_LOCKED_TIMEOUT;
302
303 while (delay > 0) {
304 if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) &
305 PLL_IS_LOCKED)
306 break;
307 delay--;
308 }
309 if (delay == 0)
310 ERROR("lock-pll: %d\n", pll_id);
311}
312
313static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd)
314{
315 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
316 BITS_WITH_WMASK(1, 1, 15));
317 if (pd)
318 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
319 BITS_WITH_WMASK(1, 1, 14));
320 else
321 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
322 BITS_WITH_WMASK(0, 1, 14));
323}
324
325static __sramfunc void dpll_suspend(void)
326{
327 int i;
328
329 /* slow mode */
330 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID));
331
332 /* save pll con */
333 for (i = 0; i < CRU_PLL_CON_NUMS; i++)
334 sram_data.dpll_con_save[i] =
335 mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i));
336 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
337 BITS_WITH_WMASK(1, 1, 15));
338 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
339 BITS_WITH_WMASK(1, 1, 14));
340}
341
342static __sramfunc void dpll_resume(void)
343{
344 uint32_t delay = PLL_LOCKED_TIMEOUT;
345
346 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
347 BITS_WITH_WMASK(1, 1, 15));
348 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
349 BITS_WITH_WMASK(0, 1, 14));
350 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
351 sram_data.dpll_con_save[1] | 0xc0000000);
352
353 dsb();
354
355 while (delay > 0) {
356 if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) &
357 PLL_IS_LOCKED)
358 break;
359 delay--;
360 }
361 if (delay == 0)
362 while (1)
363 ;
364
365 mmio_write_32(CRU_BASE + CRU_CRU_MODE,
366 PLL_NORM_MODE(DPLL_ID));
367}
368
369static inline void pll_suspend(uint32_t pll_id)
370{
371 int i;
372
373 /* slow mode */
374 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id));
375
376 /* save pll con */
377 for (i = 0; i < CRU_PLL_CON_NUMS; i++)
378 ddr_data.cru_plls_con_save[pll_id][i] =
379 mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i));
380
381 /* powerdown pll */
382 pll_pwr_dwn(pll_id, pmu_pd_off);
383}
384
385static inline void pll_resume(uint32_t pll_id)
386{
387 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
388 ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000);
389
390 pm_pll_wait_lock(pll_id);
391
392 if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id))
393 mmio_write_32(CRU_BASE + CRU_CRU_MODE,
394 PLL_NORM_MODE(pll_id));
395}
396
397static void pm_plls_suspend(void)
398{
399 ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE);
400 ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0));
401 ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1));
402 ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18));
403 ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20));
404 ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24));
405 ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38));
406 pll_suspend(NPLL_ID);
407 pll_suspend(CPLL_ID);
408 pll_suspend(GPLL_ID);
409 pll_suspend(APLL_ID);
410
411 /* core */
412 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
413 BITS_WITH_WMASK(0, 0x1f, 0));
414
415 /* pclk_dbg */
416 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
417 BITS_WITH_WMASK(0, 0xf, 0));
418
419 /* crypto */
420 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
421 BITS_WITH_WMASK(0, 0x1f, 0));
422
423 /* pwm0 */
424 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
425 BITS_WITH_WMASK(0, 0x7f, 8));
426
427 /* uart2 from 24M */
428 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
429 BITS_WITH_WMASK(2, 0x3, 8));
430
431 /* clk_rtc32k */
432 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
433 BITS_WITH_WMASK(767, 0x3fff, 0) |
434 BITS_WITH_WMASK(2, 0x3, 14));
435}
436
437static void pm_plls_resume(void)
438{
439 /* clk_rtc32k */
440 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
441 ddr_data.clk_sel38 |
442 BITS_WMSK(0x3fff, 0) |
443 BITS_WMSK(0x3, 14));
444
445 /* uart2 */
446 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
447 ddr_data.clk_sel18 | BITS_WMSK(0x3, 8));
448
449 /* pwm0 */
450 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
451 ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8));
452
453 /* crypto */
454 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
455 ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0));
456
457 /* pclk_dbg */
458 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
459 ddr_data.clk_sel1 | BITS_WMSK(0xf, 0));
460
461 /* core */
462 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
463 ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0));
464
465 pll_pwr_dwn(APLL_ID, pmu_pd_on);
466 pll_pwr_dwn(GPLL_ID, pmu_pd_on);
467 pll_pwr_dwn(CPLL_ID, pmu_pd_on);
468 pll_pwr_dwn(NPLL_ID, pmu_pd_on);
469
470 pll_resume(APLL_ID);
471 pll_resume(GPLL_ID);
472 pll_resume(CPLL_ID);
473 pll_resume(NPLL_ID);
474}
475
476#define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
477
478static __sramfunc void sram_udelay(uint32_t us)
479{
480 uint64_t pct_orig, pct_now;
481 uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us;
482
483 isb();
484 pct_orig = read_cntpct_el0();
485
486 do {
487 isb();
488 pct_now = read_cntpct_el0();
489 } while ((pct_now - pct_orig) <= to_wait);
490}
491
492/*
493 * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
494 * If the PMIC is configed for responding the sleep pin
495 * to get it into sleep mode,
496 * once the pin is output high, it will get the pmic into sleep mode.
497 */
498__sramfunc void rk3328_pmic_suspend(void)
499{
500 sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG);
501 sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4);
502 sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE);
503 mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4));
504 mmio_write_32(GPIO2_BASE + 4,
505 sram_data.pmic_sleep_gpio_save[1] | BIT(26));
506 mmio_write_32(GPIO2_BASE,
507 sram_data.pmic_sleep_gpio_save[0] | BIT(26));
508}
509
510__sramfunc void rk3328_pmic_resume(void)
511{
512 mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]);
513 mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]);
514 mmio_write_32(GRF_BASE + PMIC_SLEEP_REG,
515 sram_data.pmic_sleep_save | BITS_WMSK(0xffff, 0));
516 /* Resuming volt need a lot of time */
517 sram_udelay(100);
518}
519
520static inline void rockchip_set_sram_sp(uint64_t set_sp)
521{
522 __asm volatile("mov sp, %0\n"::"r" (set_sp) : "sp");
523}
524
525static __sramfunc void ddr_suspend(void)
526{
527 sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE +
528 DDR_PCTL2_PWRCTL);
529 sram_data.pd_sr_idle_save &= SELFREF_EN;
530
531 mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN);
532 sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE +
533 DDRGRF_SOC_CON(0));
534 mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15));
535
536 /*
537 * Override csysreq from ddrc and
538 * send valid csysreq signal to PMU,
539 * csysreq is controlled by ddrc only
540 */
541
542 /* in self-refresh */
543 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
544 while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
545 (0x03 << 12)) != (0x02 << 12))
546 ;
547 /* ddr retention */
548 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
549
550 /* ddr gating */
551 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
552 BITS_WITH_WMASK(0x7, 0x7, 4));
553 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
554 BITS_WITH_WMASK(1, 1, 4));
555 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
556 BITS_WITH_WMASK(0x1ff, 0x1ff, 1));
557 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
558 BITS_WITH_WMASK(0x3, 0x3, 0));
559
560 dpll_suspend();
561}
562
563static __sramfunc void ddr_resume(void)
564{
565 dpll_resume();
566
567 /* ddr gating */
568 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
569 BITS_WITH_WMASK(0, 0x7, 4));
570 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
571 BITS_WITH_WMASK(0, 1, 4));
572 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
573 BITS_WITH_WMASK(0, 0x1ff, 1));
574 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
575 BITS_WITH_WMASK(0, 0x3, 0));
576
577 /* ddr de_retention */
578 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
579 /* exit self-refresh */
580 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
581 while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
582 (0x03 << 12)) != (0x00 << 12))
583 ;
584
585 mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000);
586 if (sram_data.pd_sr_idle_save)
587 mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL,
588 SELFREF_EN);
589}
590
591static __sramfunc void sram_dbg_uart_suspend(void)
592{
593 sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER);
594 mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE);
595 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000);
596 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004);
597}
598
599static __sramfunc void sram_dbg_uart_resume(void)
600{
601 /* restore uart clk and reset fifo */
602 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000);
603 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000);
604 mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET);
605 mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier);
606}
607
608static __sramfunc void sram_soc_enter_lp(void)
609{
610 uint32_t apm_value;
611
612 apm_value = BIT(core_pm_en) |
613 BIT(core_pm_dis_int) |
614 BIT(core_pm_int_wakeup_en);
615 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value);
616
617 dsb();
618 isb();
619err_loop:
620 wfi();
621 /*
622 *Soc will enter low power mode and
623 *do not return to here.
624 */
625 goto err_loop;
626}
627
628__sramfunc void sram_suspend(void)
629{
630 /* disable mmu and icache */
631 tlbialle3();
632 disable_mmu_icache_el3();
633
634 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
635 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
636 CPU_BOOT_ADDR_WMASK);
637
638 /* ddr self-refresh and gating phy */
639 ddr_suspend();
640
641 rk3328_pmic_suspend();
642
643 sram_dbg_uart_suspend();
644
645 sram_soc_enter_lp();
646}
647
648static __sramfunc void sys_resume_first(void)
649{
650 sram_dbg_uart_resume();
651
652 rk3328_pmic_resume();
653
654 /* ddr self-refresh exit */
655 ddr_resume();
656
657 /* disable apm cfg */
658 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(0), CORES_PM_DISABLE);
659
660 /* the warm booting address of cpus */
661 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
662 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
663 CPU_BOOT_ADDR_WMASK);
664}
665
666void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
667{
668 rockchip_set_sram_sp(PSRAM_DT_BASE);
669
670 sram_suspend();
671
672 /* should never reach here */
673 psci_power_down_wfi();
674}
675
676int rockchip_soc_sys_pwr_dm_suspend(void)
677{
678 clks_gating_suspend(clk_ungt_msk);
679
680 pm_plls_suspend();
681
682 return 0;
683}
684
685int rockchip_soc_sys_pwr_dm_resume(void)
686{
687 pm_plls_resume();
688
689 clks_gating_resume();
690
691 plat_rockchip_gic_cpuif_enable();
692
693 return 0;
694}
695
696void plat_rockchip_pmu_init(void)
697{
698 uint32_t cpu;
699
700 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
701 cpuson_flags[cpu] = 0;
702
703 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
704 psram_sleep_cfg->ddr_func = (uint64_t)sys_resume_first;
705 psram_sleep_cfg->ddr_data = 0x00;
706 psram_sleep_cfg->ddr_flag = 0x01;
707 psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
708
709 /* the warm booting address of cpus */
710 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
711 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
712 CPU_BOOT_ADDR_WMASK);
713
714 nonboot_cpus_off();
715
716 INFO("%s: pd status 0x%x\n",
717 __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
718}