blob: a17fef9e10a77825f358da71767274f5ed365f90 [file] [log] [blame]
tony.xie54973e72017-04-24 16:18:10 +08001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-armd91aaae2017-05-10 15:16:15 +01004 * SPDX-License-Identifier: BSD-3-Clause
tony.xie54973e72017-04-24 16:18:10 +08005 */
6
tony.xie54973e72017-04-24 16:18:10 +08007#include <assert.h>
tony.xie54973e72017-04-24 16:18:10 +08008#include <errno.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009
tony.xie54973e72017-04-24 16:18:10 +080010#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <arch_helpers.h>
13#include <bl31/bl31.h>
14#include <common/debug.h>
15#include <drivers/console.h>
16#include <drivers/delay_timer.h>
17#include <lib/bakery_lock.h>
18#include <lib/mmio.h>
19#include <plat/common/platform.h>
20
21#include <plat_private.h>
tony.xie54973e72017-04-24 16:18:10 +080022#include <pmu.h>
tony.xie54973e72017-04-24 16:18:10 +080023#include <pmu_com.h>
Isla Mitchelle3631462017-07-14 10:46:32 +010024#include <rk3328_def.h>
tony.xie54973e72017-04-24 16:18:10 +080025
26DEFINE_BAKERY_LOCK(rockchip_pd_lock);
27
tony.xie54973e72017-04-24 16:18:10 +080028static struct rk3328_sleep_ddr_data ddr_data;
29static __sramdata struct rk3328_sleep_sram_data sram_data;
30
31static uint32_t cpu_warm_boot_addr;
32
33#pragma weak rk3328_pmic_suspend
34#pragma weak rk3328_pmic_resume
35
tony.xie54973e72017-04-24 16:18:10 +080036static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
37{
38 uint32_t pd_reg, apm_reg;
39
40 pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id);
41 apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) &
42 BIT(core_pm_en);
43
44 if (pd_reg && !apm_reg)
45 return core_pwr_pd;
46 else if (!pd_reg && apm_reg)
47 return core_pwr_wfi;
48
49 ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg);
50 while (1)
51 ;
52}
53
54static int cpus_power_domain_on(uint32_t cpu_id)
55{
56 uint32_t cpu_pd, cfg_info;
57
58 cpu_pd = PD_CPU0 + cpu_id;
59 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
60
61 if (cfg_info == core_pwr_pd) {
62 /* disable apm cfg */
63 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
64 CORES_PM_DISABLE);
65
66 /* if the cores have be on, power off it firstly */
67 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
68 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
69 CORES_PM_DISABLE);
70 pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
71 }
72 pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
73 } else {
74 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
75 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
76 return -EINVAL;
77 }
78
79 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
80 BIT(core_pm_sft_wakeup_en));
81 }
82
83 return 0;
84}
85
86static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
87{
88 uint32_t cpu_pd, core_pm_value;
89
90 cpu_pd = PD_CPU0 + cpu_id;
91 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
92 return 0;
93
94 if (pd_cfg == core_pwr_pd) {
95 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
96 return -EINVAL;
97 /* disable apm cfg */
98 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
99 CORES_PM_DISABLE);
100 pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
101 } else {
102 core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
103 if (pd_cfg == core_pwr_wfi_int)
104 core_pm_value |= BIT(core_pm_int_wakeup_en);
105
106 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
107 core_pm_value);
108 }
109
110 return 0;
111}
112
113static void nonboot_cpus_off(void)
114{
115 uint32_t boot_cpu, cpu;
116
117 /* turn off noboot cpus */
118 boot_cpu = plat_my_core_pos();
119 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
120 if (cpu == boot_cpu)
121 continue;
122 cpus_power_domain_off(cpu, core_pwr_pd);
123 }
124}
125
Lin Huang30e43392017-05-04 16:02:45 +0800126void sram_save(void)
127{
128 /* TODO: support the sdram save for rk3328 SoCs*/
129}
130
131void sram_restore(void)
132{
133 /* TODO: support the sdram restore for rk3328 SoCs */
134}
135
tony.xie54973e72017-04-24 16:18:10 +0800136int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
137{
138 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
139
tony.xie5f5b53b2017-05-15 10:36:14 +0800140 assert(cpu_id < PLATFORM_CORE_COUNT);
tony.xie54973e72017-04-24 16:18:10 +0800141 assert(cpuson_flags[cpu_id] == 0);
142 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
143 cpuson_entry_point[cpu_id] = entrypoint;
144 dsb();
145
146 cpus_power_domain_on(cpu_id);
147
148 return 0;
149}
150
151int rockchip_soc_cores_pwr_dm_off(void)
152{
153 uint32_t cpu_id = plat_my_core_pos();
154
155 cpus_power_domain_off(cpu_id, core_pwr_wfi);
156
157 return 0;
158}
159
160int rockchip_soc_cores_pwr_dm_suspend(void)
161{
162 uint32_t cpu_id = plat_my_core_pos();
163
tony.xie5f5b53b2017-05-15 10:36:14 +0800164 assert(cpu_id < PLATFORM_CORE_COUNT);
tony.xie54973e72017-04-24 16:18:10 +0800165 assert(cpuson_flags[cpu_id] == 0);
166 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
167 cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint();
168 dsb();
169
170 cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
171
172 return 0;
173}
174
175int rockchip_soc_cores_pwr_dm_on_finish(void)
176{
177 uint32_t cpu_id = plat_my_core_pos();
178
179 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
180
181 return 0;
182}
183
184int rockchip_soc_cores_pwr_dm_resume(void)
185{
186 uint32_t cpu_id = plat_my_core_pos();
187
188 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
189
190 return 0;
191}
192
193void __dead2 rockchip_soc_soft_reset(void)
194{
195 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID));
196 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID));
197 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID));
198 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID));
199 dsb();
200
201 mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
202 dsb();
203 /*
204 * Maybe the HW needs some times to reset the system,
205 * so we do not hope the core to excute valid codes.
206 */
207 while (1)
208 ;
209}
210
211/*
212 * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
213 * If the PMIC is configed for responding the sleep pin to power off it,
214 * once the pin is output high, it will get the pmic power off.
215 */
216void __dead2 rockchip_soc_system_off(void)
217{
218 uint32_t val;
219
220 /* gpio config */
221 val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX);
222 val &= ~GPIO2_D2_GPIO_MODE;
223 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val);
224
225 /* config output */
226 val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR);
227 val |= GPIO2_D2;
228 mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val);
229
230 /* config output high level */
231 val = mmio_read_32(GPIO2_BASE);
232 val |= GPIO2_D2;
233 mmio_write_32(GPIO2_BASE, val);
234 dsb();
235
236 while (1)
237 ;
238}
239
240static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = {
241 0x187f, 0x0000, 0x010c, 0x0000, 0x0200,
242 0x0010, 0x0000, 0x0017, 0x001f, 0x0000,
243 0x0000, 0x0000, 0x0000, 0x0003, 0x0000,
244 0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000,
245 0x0000, 0x0000, 0x0010, 0x0000, 0x0000,
246 0x0000, 0x0000, 0x0003, 0x0008
247};
248
249static void clks_gating_suspend(uint32_t *ungt_msk)
250{
251 int i;
252
253 for (i = 0; i < CRU_CLKGATE_NUMS; i++) {
254 ddr_data.clk_ungt_save[i] =
255 mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
256 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
257 ((~ungt_msk[i]) << 16) | 0xffff);
258 }
259}
260
261static void clks_gating_resume(void)
262{
263 int i;
264
265 for (i = 0; i < CRU_CLKGATE_NUMS; i++)
266 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
267 ddr_data.clk_ungt_save[i] | 0xffff0000);
268}
269
270static inline void pm_pll_wait_lock(uint32_t pll_id)
271{
272 uint32_t delay = PLL_LOCKED_TIMEOUT;
273
274 while (delay > 0) {
275 if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) &
276 PLL_IS_LOCKED)
277 break;
278 delay--;
279 }
280 if (delay == 0)
281 ERROR("lock-pll: %d\n", pll_id);
282}
283
284static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd)
285{
286 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
Justin Chadwell2b558a62019-07-03 14:11:28 +0100287 BITS_WITH_WMASK(1U, 1U, 15));
tony.xie54973e72017-04-24 16:18:10 +0800288 if (pd)
289 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
290 BITS_WITH_WMASK(1, 1, 14));
291 else
292 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
293 BITS_WITH_WMASK(0, 1, 14));
294}
295
296static __sramfunc void dpll_suspend(void)
297{
298 int i;
299
300 /* slow mode */
301 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID));
302
303 /* save pll con */
304 for (i = 0; i < CRU_PLL_CON_NUMS; i++)
305 sram_data.dpll_con_save[i] =
306 mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i));
307 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
Justin Chadwell2b558a62019-07-03 14:11:28 +0100308 BITS_WITH_WMASK(1U, 1U, 15));
tony.xie54973e72017-04-24 16:18:10 +0800309 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
310 BITS_WITH_WMASK(1, 1, 14));
311}
312
313static __sramfunc void dpll_resume(void)
314{
315 uint32_t delay = PLL_LOCKED_TIMEOUT;
316
317 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
Justin Chadwell2b558a62019-07-03 14:11:28 +0100318 BITS_WITH_WMASK(1U, 1U, 15));
tony.xie54973e72017-04-24 16:18:10 +0800319 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
320 BITS_WITH_WMASK(0, 1, 14));
321 mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
322 sram_data.dpll_con_save[1] | 0xc0000000);
323
324 dsb();
325
326 while (delay > 0) {
327 if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) &
328 PLL_IS_LOCKED)
329 break;
330 delay--;
331 }
332 if (delay == 0)
333 while (1)
334 ;
335
336 mmio_write_32(CRU_BASE + CRU_CRU_MODE,
337 PLL_NORM_MODE(DPLL_ID));
338}
339
340static inline void pll_suspend(uint32_t pll_id)
341{
342 int i;
343
344 /* slow mode */
345 mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id));
346
347 /* save pll con */
348 for (i = 0; i < CRU_PLL_CON_NUMS; i++)
349 ddr_data.cru_plls_con_save[pll_id][i] =
350 mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i));
351
352 /* powerdown pll */
353 pll_pwr_dwn(pll_id, pmu_pd_off);
354}
355
356static inline void pll_resume(uint32_t pll_id)
357{
358 mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
359 ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000);
360
361 pm_pll_wait_lock(pll_id);
362
363 if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id))
364 mmio_write_32(CRU_BASE + CRU_CRU_MODE,
365 PLL_NORM_MODE(pll_id));
366}
367
368static void pm_plls_suspend(void)
369{
370 ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE);
371 ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0));
372 ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1));
373 ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18));
374 ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20));
375 ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24));
376 ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38));
377 pll_suspend(NPLL_ID);
378 pll_suspend(CPLL_ID);
379 pll_suspend(GPLL_ID);
380 pll_suspend(APLL_ID);
381
382 /* core */
383 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
384 BITS_WITH_WMASK(0, 0x1f, 0));
385
386 /* pclk_dbg */
387 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
388 BITS_WITH_WMASK(0, 0xf, 0));
389
390 /* crypto */
391 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
392 BITS_WITH_WMASK(0, 0x1f, 0));
393
394 /* pwm0 */
395 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
396 BITS_WITH_WMASK(0, 0x7f, 8));
397
398 /* uart2 from 24M */
399 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
400 BITS_WITH_WMASK(2, 0x3, 8));
401
402 /* clk_rtc32k */
403 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
404 BITS_WITH_WMASK(767, 0x3fff, 0) |
Justin Chadwell2b558a62019-07-03 14:11:28 +0100405 BITS_WITH_WMASK(2U, 0x3u, 14));
tony.xie54973e72017-04-24 16:18:10 +0800406}
407
408static void pm_plls_resume(void)
409{
410 /* clk_rtc32k */
411 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
412 ddr_data.clk_sel38 |
413 BITS_WMSK(0x3fff, 0) |
Justin Chadwell2b558a62019-07-03 14:11:28 +0100414 BITS_WMSK(0x3u, 14));
tony.xie54973e72017-04-24 16:18:10 +0800415
416 /* uart2 */
417 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
418 ddr_data.clk_sel18 | BITS_WMSK(0x3, 8));
419
420 /* pwm0 */
421 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
422 ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8));
423
424 /* crypto */
425 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
426 ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0));
427
428 /* pclk_dbg */
429 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
430 ddr_data.clk_sel1 | BITS_WMSK(0xf, 0));
431
432 /* core */
433 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
434 ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0));
435
436 pll_pwr_dwn(APLL_ID, pmu_pd_on);
437 pll_pwr_dwn(GPLL_ID, pmu_pd_on);
438 pll_pwr_dwn(CPLL_ID, pmu_pd_on);
439 pll_pwr_dwn(NPLL_ID, pmu_pd_on);
440
441 pll_resume(APLL_ID);
442 pll_resume(GPLL_ID);
443 pll_resume(CPLL_ID);
444 pll_resume(NPLL_ID);
445}
446
447#define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
448
449static __sramfunc void sram_udelay(uint32_t us)
450{
451 uint64_t pct_orig, pct_now;
452 uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us;
453
454 isb();
455 pct_orig = read_cntpct_el0();
456
457 do {
458 isb();
459 pct_now = read_cntpct_el0();
460 } while ((pct_now - pct_orig) <= to_wait);
461}
462
463/*
464 * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
465 * If the PMIC is configed for responding the sleep pin
466 * to get it into sleep mode,
467 * once the pin is output high, it will get the pmic into sleep mode.
468 */
469__sramfunc void rk3328_pmic_suspend(void)
470{
471 sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG);
472 sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4);
473 sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE);
474 mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4));
475 mmio_write_32(GPIO2_BASE + 4,
476 sram_data.pmic_sleep_gpio_save[1] | BIT(26));
477 mmio_write_32(GPIO2_BASE,
478 sram_data.pmic_sleep_gpio_save[0] | BIT(26));
479}
480
481__sramfunc void rk3328_pmic_resume(void)
482{
483 mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]);
484 mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]);
485 mmio_write_32(GRF_BASE + PMIC_SLEEP_REG,
Justin Chadwell2b558a62019-07-03 14:11:28 +0100486 sram_data.pmic_sleep_save | BITS_WMSK(0xffffu, 0));
tony.xie54973e72017-04-24 16:18:10 +0800487 /* Resuming volt need a lot of time */
488 sram_udelay(100);
489}
490
tony.xie54973e72017-04-24 16:18:10 +0800491static __sramfunc void ddr_suspend(void)
492{
493 sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE +
494 DDR_PCTL2_PWRCTL);
495 sram_data.pd_sr_idle_save &= SELFREF_EN;
496
497 mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN);
498 sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE +
499 DDRGRF_SOC_CON(0));
500 mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15));
501
502 /*
503 * Override csysreq from ddrc and
504 * send valid csysreq signal to PMU,
505 * csysreq is controlled by ddrc only
506 */
507
508 /* in self-refresh */
509 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
510 while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
511 (0x03 << 12)) != (0x02 << 12))
512 ;
513 /* ddr retention */
514 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
515
516 /* ddr gating */
517 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
518 BITS_WITH_WMASK(0x7, 0x7, 4));
519 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
520 BITS_WITH_WMASK(1, 1, 4));
521 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
522 BITS_WITH_WMASK(0x1ff, 0x1ff, 1));
523 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
524 BITS_WITH_WMASK(0x3, 0x3, 0));
525
526 dpll_suspend();
527}
528
Lin Huang30e43392017-05-04 16:02:45 +0800529__sramfunc void dmc_restore(void)
tony.xie54973e72017-04-24 16:18:10 +0800530{
531 dpll_resume();
532
533 /* ddr gating */
534 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
535 BITS_WITH_WMASK(0, 0x7, 4));
536 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
537 BITS_WITH_WMASK(0, 1, 4));
538 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
539 BITS_WITH_WMASK(0, 0x1ff, 1));
540 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
541 BITS_WITH_WMASK(0, 0x3, 0));
542
543 /* ddr de_retention */
544 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
545 /* exit self-refresh */
546 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
547 while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
548 (0x03 << 12)) != (0x00 << 12))
549 ;
550
551 mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000);
552 if (sram_data.pd_sr_idle_save)
553 mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL,
554 SELFREF_EN);
555}
556
557static __sramfunc void sram_dbg_uart_suspend(void)
558{
559 sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER);
560 mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE);
561 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000);
562 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004);
563}
564
Lin Huang30e43392017-05-04 16:02:45 +0800565__sramfunc void sram_dbg_uart_resume(void)
tony.xie54973e72017-04-24 16:18:10 +0800566{
567 /* restore uart clk and reset fifo */
568 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000);
569 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000);
570 mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET);
571 mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier);
572}
573
574static __sramfunc void sram_soc_enter_lp(void)
575{
576 uint32_t apm_value;
577
578 apm_value = BIT(core_pm_en) |
579 BIT(core_pm_dis_int) |
580 BIT(core_pm_int_wakeup_en);
581 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value);
582
583 dsb();
584 isb();
585err_loop:
586 wfi();
587 /*
588 *Soc will enter low power mode and
589 *do not return to here.
590 */
591 goto err_loop;
592}
593
594__sramfunc void sram_suspend(void)
595{
596 /* disable mmu and icache */
tony.xie54973e72017-04-24 16:18:10 +0800597 disable_mmu_icache_el3();
Antonio Nino Diazeb24dff2018-02-19 13:53:48 +0000598 tlbialle3();
599 dsbsy();
600 isb();
tony.xie54973e72017-04-24 16:18:10 +0800601
602 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
Lin Huang30e43392017-05-04 16:02:45 +0800603 ((uintptr_t)&pmu_cpuson_entrypoint >> CPU_BOOT_ADDR_ALIGN) |
tony.xie54973e72017-04-24 16:18:10 +0800604 CPU_BOOT_ADDR_WMASK);
605
606 /* ddr self-refresh and gating phy */
607 ddr_suspend();
608
609 rk3328_pmic_suspend();
610
611 sram_dbg_uart_suspend();
612
613 sram_soc_enter_lp();
614}
615
tony.xie54973e72017-04-24 16:18:10 +0800616void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
617{
tony.xie54973e72017-04-24 16:18:10 +0800618 sram_suspend();
619
620 /* should never reach here */
621 psci_power_down_wfi();
622}
623
624int rockchip_soc_sys_pwr_dm_suspend(void)
625{
626 clks_gating_suspend(clk_ungt_msk);
627
628 pm_plls_suspend();
629
630 return 0;
631}
632
633int rockchip_soc_sys_pwr_dm_resume(void)
634{
635 pm_plls_resume();
636
637 clks_gating_resume();
638
639 plat_rockchip_gic_cpuif_enable();
640
641 return 0;
642}
643
Lin Huang30e43392017-05-04 16:02:45 +0800644void rockchip_plat_mmu_el3(void)
645{
646 /* TODO: support the el3 for rk3328 SoCs */
647}
648
tony.xie54973e72017-04-24 16:18:10 +0800649void plat_rockchip_pmu_init(void)
650{
651 uint32_t cpu;
652
653 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
654 cpuson_flags[cpu] = 0;
655
656 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
tony.xie54973e72017-04-24 16:18:10 +0800657
658 /* the warm booting address of cpus */
659 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
660 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
661 CPU_BOOT_ADDR_WMASK);
662
663 nonboot_cpus_off();
664
665 INFO("%s: pd status 0x%x\n",
666 __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
667}