blob: 5f4e64f7b8a93d5a02dd01ddaa3d67e9f82f59cc [file] [log] [blame]
XiaoDong Huang83f79a82019-06-13 10:55:50 +08001/*
2 * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9
10#include <platform_def.h>
11
12#include <arch_helpers.h>
13#include <bl31/bl31.h>
14#include <common/debug.h>
15#include <drivers/console.h>
16#include <drivers/delay_timer.h>
17#include <lib/bakery_lock.h>
18#include <lib/mmio.h>
19#include <plat/common/platform.h>
20
21#include <cpus_on_fixed_addr.h>
22#include <plat_private.h>
23#include <pmu.h>
24#include <px30_def.h>
Heiko Stuebner9e56bec2019-10-09 12:15:56 +020025#include <secure.h>
XiaoDong Huang83f79a82019-06-13 10:55:50 +080026#include <soc.h>
27
28DEFINE_BAKERY_LOCK(rockchip_pd_lock);
29#define rockchip_pd_lock_init() bakery_lock_init(&rockchip_pd_lock)
30#define rockchip_pd_lock_get() bakery_lock_get(&rockchip_pd_lock)
31#define rockchip_pd_lock_rls() bakery_lock_release(&rockchip_pd_lock)
32
33static struct psram_data_t *psram_boot_cfg =
34 (struct psram_data_t *)&sys_sleep_flag_sram;
35
36/*
37 * There are two ways to powering on or off on core.
38 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg,
39 * it is core_pwr_pd mode
40 * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
41 * then, if the core enter into wfi, it power domain will be
42 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode
43 * so we need core_pm_cfg_info to distinguish which method be used now.
44 */
45
46static uint32_t cores_pd_cfg_info[PLATFORM_CORE_COUNT]
47#if USE_COHERENT_MEM
48__attribute__ ((section("tzfw_coherent_mem")))
49#endif
50;
51
52struct px30_sleep_ddr_data {
53 uint32_t clk_sel0;
54 uint32_t cru_mode_save;
55 uint32_t cru_pmu_mode_save;
56 uint32_t ddrc_hwlpctl;
57 uint32_t ddrc_pwrctrl;
58 uint32_t ddrgrf_con0;
59 uint32_t ddrgrf_con1;
60 uint32_t ddrstdby_con0;
61 uint32_t gpio0b_iomux;
62 uint32_t gpio0c_iomux;
63 uint32_t pmu_pwrmd_core_l;
64 uint32_t pmu_pwrmd_core_h;
65 uint32_t pmu_pwrmd_cmm_l;
66 uint32_t pmu_pwrmd_cmm_h;
67 uint32_t pmu_wkup_cfg2_l;
68 uint32_t pmu_cru_clksel_con0;
69 uint32_t pmugrf_soc_con0;
70 uint32_t pmusgrf_soc_con0;
71 uint32_t pmic_slp_iomux;
72 uint32_t pgrf_pvtm_con[2];
73 uint32_t cru_clk_gate[CRU_CLKGATES_CON_CNT];
74 uint32_t cru_pmu_clk_gate[CRU_PMU_CLKGATE_CON_CNT];
75 uint32_t cru_plls_con_save[END_PLL_ID][PLL_CON_CNT];
76 uint32_t cpu_qos[CPU_AXI_QOS_NUM_REGS];
77 uint32_t gpu_qos[CPU_AXI_QOS_NUM_REGS];
78 uint32_t isp_128m_qos[CPU_AXI_QOS_NUM_REGS];
79 uint32_t isp_rd_qos[CPU_AXI_QOS_NUM_REGS];
80 uint32_t isp_wr_qos[CPU_AXI_QOS_NUM_REGS];
81 uint32_t isp_m1_qos[CPU_AXI_QOS_NUM_REGS];
82 uint32_t vip_qos[CPU_AXI_QOS_NUM_REGS];
83 uint32_t rga_rd_qos[CPU_AXI_QOS_NUM_REGS];
84 uint32_t rga_wr_qos[CPU_AXI_QOS_NUM_REGS];
85 uint32_t vop_m0_qos[CPU_AXI_QOS_NUM_REGS];
86 uint32_t vop_m1_qos[CPU_AXI_QOS_NUM_REGS];
87 uint32_t vpu_qos[CPU_AXI_QOS_NUM_REGS];
88 uint32_t vpu_r128_qos[CPU_AXI_QOS_NUM_REGS];
89 uint32_t dcf_qos[CPU_AXI_QOS_NUM_REGS];
90 uint32_t dmac_qos[CPU_AXI_QOS_NUM_REGS];
91 uint32_t crypto_qos[CPU_AXI_QOS_NUM_REGS];
92 uint32_t gmac_qos[CPU_AXI_QOS_NUM_REGS];
93 uint32_t emmc_qos[CPU_AXI_QOS_NUM_REGS];
94 uint32_t nand_qos[CPU_AXI_QOS_NUM_REGS];
95 uint32_t sdio_qos[CPU_AXI_QOS_NUM_REGS];
96 uint32_t sfc_qos[CPU_AXI_QOS_NUM_REGS];
97 uint32_t sdmmc_qos[CPU_AXI_QOS_NUM_REGS];
98 uint32_t usb_host_qos[CPU_AXI_QOS_NUM_REGS];
99 uint32_t usb_otg_qos[CPU_AXI_QOS_NUM_REGS];
100};
101
102static struct px30_sleep_ddr_data ddr_data
103#if USE_COHERENT_MEM
104__attribute__ ((section("tzfw_coherent_mem")))
105#endif
106;
107
108static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
109{
110 assert(cpu_id < PLATFORM_CORE_COUNT);
111 return cores_pd_cfg_info[cpu_id];
112}
113
114static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value)
115{
116 assert(cpu_id < PLATFORM_CORE_COUNT);
117 cores_pd_cfg_info[cpu_id] = value;
118#if !USE_COHERENT_MEM
119 flush_dcache_range((uintptr_t)&cores_pd_cfg_info[cpu_id],
120 sizeof(uint32_t));
121#endif
122}
123
124static inline uint32_t pmu_power_domain_st(uint32_t pd)
125{
126 return mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & BIT(pd) ?
127 pmu_pd_off :
128 pmu_pd_on;
129}
130
131static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state)
132{
133 uint32_t loop = 0;
134 int ret = 0;
135
136 rockchip_pd_lock_get();
137
138 mmio_write_32(PMU_BASE + PMU_PWRDN_CON,
139 BITS_WITH_WMASK(pd_state, 0x1, pd));
140 dsb();
141
142 while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) {
143 udelay(1);
144 loop++;
145 }
146
147 if (pmu_power_domain_st(pd) != pd_state) {
148 WARN("%s: %d, %d, error!\n", __func__, pd, pd_state);
149 ret = -EINVAL;
150 }
151
152 rockchip_pd_lock_rls();
153
154 return ret;
155}
156
157static inline uint32_t pmu_bus_idle_st(uint32_t bus)
158{
159 return !!((mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & BIT(bus)) &&
160 (mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & BIT(bus + 16)));
161}
162
163static void pmu_bus_idle_req(uint32_t bus, uint32_t state)
164{
165 uint32_t wait_cnt = 0;
166
167 mmio_write_32(PMU_BASE + PMU_BUS_IDLE_REQ,
168 BITS_WITH_WMASK(state, 0x1, bus));
169
170 while (pmu_bus_idle_st(bus) != state &&
171 wait_cnt < BUS_IDLE_LOOP) {
172 udelay(1);
173 wait_cnt++;
174 }
175
176 if (pmu_bus_idle_st(bus) != state)
177 WARN("%s:idle_st=0x%x, bus_id=%d\n",
178 __func__, mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), bus);
179}
180
181static void qos_save(void)
182{
183 /* scu powerdomain will power off, so cpu qos should be saved */
184 SAVE_QOS(ddr_data.cpu_qos, CPU);
185
186 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
187 SAVE_QOS(ddr_data.gpu_qos, GPU);
188 if (pmu_power_domain_st(PD_VI) == pmu_pd_on) {
189 SAVE_QOS(ddr_data.isp_128m_qos, ISP_128M);
190 SAVE_QOS(ddr_data.isp_rd_qos, ISP_RD);
191 SAVE_QOS(ddr_data.isp_wr_qos, ISP_WR);
192 SAVE_QOS(ddr_data.isp_m1_qos, ISP_M1);
193 SAVE_QOS(ddr_data.vip_qos, VIP);
194 }
195 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
196 SAVE_QOS(ddr_data.rga_rd_qos, RGA_RD);
197 SAVE_QOS(ddr_data.rga_wr_qos, RGA_WR);
198 SAVE_QOS(ddr_data.vop_m0_qos, VOP_M0);
199 SAVE_QOS(ddr_data.vop_m1_qos, VOP_M1);
200 }
201 if (pmu_power_domain_st(PD_VPU) == pmu_pd_on) {
202 SAVE_QOS(ddr_data.vpu_qos, VPU);
203 SAVE_QOS(ddr_data.vpu_r128_qos, VPU_R128);
204 }
205 if (pmu_power_domain_st(PD_MMC_NAND) == pmu_pd_on) {
206 SAVE_QOS(ddr_data.emmc_qos, EMMC);
207 SAVE_QOS(ddr_data.nand_qos, NAND);
208 SAVE_QOS(ddr_data.sdio_qos, SDIO);
209 SAVE_QOS(ddr_data.sfc_qos, SFC);
210 }
211 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
212 SAVE_QOS(ddr_data.gmac_qos, GMAC);
213 if (pmu_power_domain_st(PD_CRYPTO) == pmu_pd_on)
214 SAVE_QOS(ddr_data.crypto_qos, CRYPTO);
215 if (pmu_power_domain_st(PD_SDCARD) == pmu_pd_on)
216 SAVE_QOS(ddr_data.sdmmc_qos, SDMMC);
217 if (pmu_power_domain_st(PD_USB) == pmu_pd_on) {
218 SAVE_QOS(ddr_data.usb_host_qos, USB_HOST);
219 SAVE_QOS(ddr_data.usb_otg_qos, USB_OTG);
220 }
221}
222
223static void qos_restore(void)
224{
225 RESTORE_QOS(ddr_data.cpu_qos, CPU);
226
227 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
228 RESTORE_QOS(ddr_data.gpu_qos, GPU);
229 if (pmu_power_domain_st(PD_VI) == pmu_pd_on) {
230 RESTORE_QOS(ddr_data.isp_128m_qos, ISP_128M);
231 RESTORE_QOS(ddr_data.isp_rd_qos, ISP_RD);
232 RESTORE_QOS(ddr_data.isp_wr_qos, ISP_WR);
233 RESTORE_QOS(ddr_data.isp_m1_qos, ISP_M1);
234 RESTORE_QOS(ddr_data.vip_qos, VIP);
235 }
236 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
237 RESTORE_QOS(ddr_data.rga_rd_qos, RGA_RD);
238 RESTORE_QOS(ddr_data.rga_wr_qos, RGA_WR);
239 RESTORE_QOS(ddr_data.vop_m0_qos, VOP_M0);
240 RESTORE_QOS(ddr_data.vop_m1_qos, VOP_M1);
241 }
242 if (pmu_power_domain_st(PD_VPU) == pmu_pd_on) {
243 RESTORE_QOS(ddr_data.vpu_qos, VPU);
244 RESTORE_QOS(ddr_data.vpu_r128_qos, VPU_R128);
245 }
246 if (pmu_power_domain_st(PD_MMC_NAND) == pmu_pd_on) {
247 RESTORE_QOS(ddr_data.emmc_qos, EMMC);
248 RESTORE_QOS(ddr_data.nand_qos, NAND);
249 RESTORE_QOS(ddr_data.sdio_qos, SDIO);
250 RESTORE_QOS(ddr_data.sfc_qos, SFC);
251 }
252 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
253 RESTORE_QOS(ddr_data.gmac_qos, GMAC);
254 if (pmu_power_domain_st(PD_CRYPTO) == pmu_pd_on)
255 RESTORE_QOS(ddr_data.crypto_qos, CRYPTO);
256 if (pmu_power_domain_st(PD_SDCARD) == pmu_pd_on)
257 RESTORE_QOS(ddr_data.sdmmc_qos, SDMMC);
258 if (pmu_power_domain_st(PD_USB) == pmu_pd_on) {
259 RESTORE_QOS(ddr_data.usb_host_qos, USB_HOST);
260 RESTORE_QOS(ddr_data.usb_otg_qos, USB_OTG);
261 }
262}
263
264static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state)
265{
266 uint32_t state;
267
268 if (pmu_power_domain_st(pd_id) == pd_state)
269 goto out;
270
271 if (pd_state == pmu_pd_on)
272 pmu_power_domain_ctr(pd_id, pd_state);
273
274 state = (pd_state == pmu_pd_off) ? bus_idle : bus_active;
275
276 switch (pd_id) {
277 case PD_GPU:
278 pmu_bus_idle_req(BUS_ID_GPU, state);
279 break;
280 case PD_VI:
281 pmu_bus_idle_req(BUS_ID_VI, state);
282 break;
283 case PD_VO:
284 pmu_bus_idle_req(BUS_ID_VO, state);
285 break;
286 case PD_VPU:
287 pmu_bus_idle_req(BUS_ID_VPU, state);
288 break;
289 case PD_MMC_NAND:
290 pmu_bus_idle_req(BUS_ID_MMC, state);
291 break;
292 case PD_GMAC:
293 pmu_bus_idle_req(BUS_ID_GMAC, state);
294 break;
295 case PD_CRYPTO:
296 pmu_bus_idle_req(BUS_ID_CRYPTO, state);
297 break;
298 case PD_SDCARD:
299 pmu_bus_idle_req(BUS_ID_SDCARD, state);
300 break;
301 case PD_USB:
302 pmu_bus_idle_req(BUS_ID_USB, state);
303 break;
304 default:
305 break;
306 }
307
308 if (pd_state == pmu_pd_off)
309 pmu_power_domain_ctr(pd_id, pd_state);
310
311out:
312 return 0;
313}
314
315static uint32_t pmu_powerdomain_state;
316
317static void pmu_power_domains_suspend(void)
318{
319 uint32_t clkgt_save[CRU_CLKGATES_CON_CNT + CRU_PMU_CLKGATE_CON_CNT];
320
321 clk_gate_con_save(clkgt_save);
322 clk_gate_con_disable();
323 qos_save();
324
325 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
326 pmu_set_power_domain(PD_GPU, pmu_pd_off);
327 pmu_set_power_domain(PD_VI, pmu_pd_off);
328 pmu_set_power_domain(PD_VO, pmu_pd_off);
329 pmu_set_power_domain(PD_VPU, pmu_pd_off);
330 pmu_set_power_domain(PD_MMC_NAND, pmu_pd_off);
331 pmu_set_power_domain(PD_GMAC, pmu_pd_off);
332 pmu_set_power_domain(PD_CRYPTO, pmu_pd_off);
333 pmu_set_power_domain(PD_SDCARD, pmu_pd_off);
334 pmu_set_power_domain(PD_USB, pmu_pd_off);
335
336 clk_gate_con_restore(clkgt_save);
337}
338
339static void pmu_power_domains_resume(void)
340{
341 uint32_t clkgt_save[CRU_CLKGATES_CON_CNT + CRU_PMU_CLKGATE_CON_CNT];
342
343 clk_gate_con_save(clkgt_save);
344 clk_gate_con_disable();
345
346 if (!(pmu_powerdomain_state & BIT(PD_USB)))
347 pmu_set_power_domain(PD_USB, pmu_pd_on);
348 if (!(pmu_powerdomain_state & BIT(PD_SDCARD)))
349 pmu_set_power_domain(PD_SDCARD, pmu_pd_on);
350 if (!(pmu_powerdomain_state & BIT(PD_CRYPTO)))
351 pmu_set_power_domain(PD_CRYPTO, pmu_pd_on);
352 if (!(pmu_powerdomain_state & BIT(PD_GMAC)))
353 pmu_set_power_domain(PD_GMAC, pmu_pd_on);
354 if (!(pmu_powerdomain_state & BIT(PD_MMC_NAND)))
355 pmu_set_power_domain(PD_MMC_NAND, pmu_pd_on);
356 if (!(pmu_powerdomain_state & BIT(PD_VPU)))
357 pmu_set_power_domain(PD_VPU, pmu_pd_on);
358 if (!(pmu_powerdomain_state & BIT(PD_VO)))
359 pmu_set_power_domain(PD_VO, pmu_pd_on);
360 if (!(pmu_powerdomain_state & BIT(PD_VI)))
361 pmu_set_power_domain(PD_VI, pmu_pd_on);
362 if (!(pmu_powerdomain_state & BIT(PD_GPU)))
363 pmu_set_power_domain(PD_GPU, pmu_pd_on);
364
365 qos_restore();
366 clk_gate_con_restore(clkgt_save);
367}
368
369static int check_cpu_wfie(uint32_t cpu)
370{
371 uint32_t loop = 0, wfie_msk = CKECK_WFEI_MSK << cpu;
372
373 while (!(mmio_read_32(GRF_BASE + GRF_CPU_STATUS1) & wfie_msk) &&
374 (loop < WFEI_CHECK_LOOP)) {
375 udelay(1);
376 loop++;
377 }
378
379 if ((mmio_read_32(GRF_BASE + GRF_CPU_STATUS1) & wfie_msk) == 0) {
380 WARN("%s: %d, %d, error!\n", __func__, cpu, wfie_msk);
381 return -EINVAL;
382 }
383
384 return 0;
385}
386
387static int cpus_power_domain_on(uint32_t cpu_id)
388{
389 uint32_t cpu_pd, apm_value, cfg_info, loop = 0;
390
391 cpu_pd = PD_CPU0 + cpu_id;
392 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
393
394 if (cfg_info == core_pwr_pd) {
395 /* disable apm cfg */
396 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
397 WITH_16BITS_WMSK(CORES_PM_DISABLE));
398 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
399 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
400 WITH_16BITS_WMSK(CORES_PM_DISABLE));
401 pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
402 }
403 pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
404 } else {
405 /* wait cpu down */
406 while (pmu_power_domain_st(cpu_pd) == pmu_pd_on && loop < 100) {
407 udelay(2);
408 loop++;
409 }
410
411 /* return error if can't wait cpu down */
412 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
413 WARN("%s:can't wait cpu down\n", __func__);
414 return -EINVAL;
415 }
416
417 /* power up cpu in power down state */
418 apm_value = BIT(core_pm_sft_wakeup_en);
419 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
420 WITH_16BITS_WMSK(apm_value));
421 }
422
423 return 0;
424}
425
426static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
427{
428 uint32_t cpu_pd, apm_value;
429
430 cpu_pd = PD_CPU0 + cpu_id;
431 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
432 return 0;
433
434 if (pd_cfg == core_pwr_pd) {
435 if (check_cpu_wfie(cpu_id))
436 return -EINVAL;
437 /* disable apm cfg */
438 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
439 WITH_16BITS_WMSK(CORES_PM_DISABLE));
440 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
441 pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
442 } else {
443 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
444 apm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
445 if (pd_cfg == core_pwr_wfi_int)
446 apm_value |= BIT(core_pm_int_wakeup_en);
447 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
448 WITH_16BITS_WMSK(apm_value));
449 }
450
451 return 0;
452}
453
454static void nonboot_cpus_off(void)
455{
456 uint32_t boot_cpu, cpu;
457
458 boot_cpu = plat_my_core_pos();
459
460 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
461 if (cpu == boot_cpu)
462 continue;
463 cpus_power_domain_off(cpu, core_pwr_pd);
464 }
465}
466
467int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr,
468 uint64_t entrypoint)
469{
470 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
471
472 assert(cpu_id < PLATFORM_CORE_COUNT);
473 assert(cpuson_flags[cpu_id] == 0);
474 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
475 cpuson_entry_point[cpu_id] = entrypoint;
476 dsb();
477
478 cpus_power_domain_on(cpu_id);
479
480 return PSCI_E_SUCCESS;
481}
482
483int rockchip_soc_cores_pwr_dm_on_finish(void)
484{
485 uint32_t cpu_id = plat_my_core_pos();
486
487 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
488 WITH_16BITS_WMSK(CORES_PM_DISABLE));
489 return PSCI_E_SUCCESS;
490}
491
492int rockchip_soc_cores_pwr_dm_off(void)
493{
494 uint32_t cpu_id = plat_my_core_pos();
495
496 cpus_power_domain_off(cpu_id, core_pwr_wfi);
497
498 return PSCI_E_SUCCESS;
499}
500
501int rockchip_soc_cores_pwr_dm_suspend(void)
502{
503 uint32_t cpu_id = plat_my_core_pos();
504
505 assert(cpu_id < PLATFORM_CORE_COUNT);
506 assert(cpuson_flags[cpu_id] == 0);
507 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
508 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint();
509 dsb();
510
511 cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
512
513 return PSCI_E_SUCCESS;
514}
515
516int rockchip_soc_cores_pwr_dm_resume(void)
517{
518 uint32_t cpu_id = plat_my_core_pos();
519
520 /* Disable core_pm */
521 mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
522 WITH_16BITS_WMSK(CORES_PM_DISABLE));
523
524 return PSCI_E_SUCCESS;
525}
526
527#define CLK_MSK_GATING(msk, con) \
528 mmio_write_32(CRU_BASE + (con), ((msk) << 16) | 0xffff)
529#define CLK_MSK_UNGATING(msk, con) \
530 mmio_write_32(CRU_BASE + (con), ((~(msk)) << 16) | 0xffff)
531
532static uint32_t clk_ungt_msk[CRU_CLKGATES_CON_CNT] = {
533 0xe0ff, 0xffff, 0x0000, 0x0000,
534 0x0000, 0x0380, 0x0000, 0x0000,
535 0x07c0, 0x0000, 0x0000, 0x000f,
536 0x0061, 0x1f02, 0x0440, 0x1801,
537 0x004b, 0x0000
538};
539
540static uint32_t clk_pmu_ungt_msk[CRU_PMU_CLKGATE_CON_CNT] = {
541 0xf1ff, 0x0310
542};
543
544void clk_gate_suspend(void)
545{
546 int i;
547
548 for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) {
549 ddr_data.cru_clk_gate[i] =
550 mmio_read_32(CRU_BASE + CRU_CLKGATES_CON(i));
551 mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i),
552 WITH_16BITS_WMSK(~clk_ungt_msk[i]));
553 }
554
555 for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++) {
556 ddr_data.cru_pmu_clk_gate[i] =
557 mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i));
558 mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i),
559 WITH_16BITS_WMSK(~clk_pmu_ungt_msk[i]));
560 }
561}
562
563void clk_gate_resume(void)
564{
565 int i;
566
567 for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++)
568 mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i),
569 WITH_16BITS_WMSK(ddr_data.cru_pmu_clk_gate[i]));
570
571 for (i = 0; i < CRU_CLKGATES_CON_CNT; i++)
572 mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i),
573 WITH_16BITS_WMSK(ddr_data.cru_clk_gate[i]));
574}
575
576static void pvtm_32k_config(void)
577{
578 uint32_t pvtm_freq_khz, pvtm_div;
579
580 ddr_data.pmu_cru_clksel_con0 =
581 mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0));
582
583 ddr_data.pgrf_pvtm_con[0] =
584 mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_CON0);
585 ddr_data.pgrf_pvtm_con[1] =
586 mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_CON1);
587
588 mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
589 BITS_WITH_WMASK(0, 0x3, pgrf_pvtm_st));
590 dsb();
591 mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
592 BITS_WITH_WMASK(1, 0x1, pgrf_pvtm_en));
593 dsb();
594 mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON1, PVTM_CALC_CNT);
595 dsb();
596
597 mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
598 BITS_WITH_WMASK(1, 0x1, pgrf_pvtm_st));
599
600 /* pmugrf_pvtm_st0 will be clear after PVTM start,
601 * which will cost about 6 cycles of pvtm at least.
602 * So we wait 30 cycles of pvtm for security.
603 */
604 while (mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST1) < 30)
605 ;
606
607 dsb();
608 while (!(mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST0) & 0x1))
609 ;
610
611 pvtm_freq_khz =
612 (mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST1) * 24000 +
613 PVTM_CALC_CNT / 2) / PVTM_CALC_CNT;
614 pvtm_div = (pvtm_freq_khz + 16) / 32;
615
616 /* pvtm_div = div_factor << 2 + 1,
617 * so div_factor = (pvtm_div - 1) >> 2.
618 * But the operation ">> 2" will clear the low bit of pvtm_div,
619 * so we don't have to do "- 1" for compasation
620 */
621 pvtm_div = pvtm_div >> 2;
622 if (pvtm_div > 0x3f)
623 pvtm_div = 0x3f;
624
625 mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
626 BITS_WITH_WMASK(pvtm_div, 0x3f, pgrf_pvtm_div));
627
628 /* select pvtm as 32k source */
629 mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0),
Ambroise Vincente499f632019-07-25 16:06:50 +0100630 BITS_WITH_WMASK(1, 0x3U, 14));
XiaoDong Huang83f79a82019-06-13 10:55:50 +0800631}
632
633static void pvtm_32k_config_restore(void)
634{
635 mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0),
Ambroise Vincente499f632019-07-25 16:06:50 +0100636 ddr_data.pmu_cru_clksel_con0 | BITS_WMSK(0x3U, 14));
XiaoDong Huang83f79a82019-06-13 10:55:50 +0800637
638 mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
639 WITH_16BITS_WMSK(ddr_data.pgrf_pvtm_con[0]));
640 mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON1,
641 ddr_data.pgrf_pvtm_con[1]);
642}
643
644static void ddr_sleep_config(void)
645{
646 /* disable ddr pd, sr */
647 ddr_data.ddrc_pwrctrl = mmio_read_32(DDR_UPCTL_BASE + 0x30);
648 mmio_write_32(DDR_UPCTL_BASE + 0x30, BITS_WITH_WMASK(0x0, 0x3, 0));
649
650 /* disable ddr auto gt */
651 ddr_data.ddrgrf_con1 = mmio_read_32(DDRGRF_BASE + 0x4);
652 mmio_write_32(DDRGRF_BASE + 0x4, BITS_WITH_WMASK(0x0, 0x1f, 0));
653
654 /* disable ddr standby */
655 ddr_data.ddrstdby_con0 = mmio_read_32(DDR_STDBY_BASE + 0x0);
656 mmio_write_32(DDR_STDBY_BASE + 0x0, BITS_WITH_WMASK(0x0, 0x1, 0));
657 while ((mmio_read_32(DDR_UPCTL_BASE + 0x4) & 0x7) != 1)
658 ;
659
660 /* ddr pmu ctrl */
661 ddr_data.ddrgrf_con0 = mmio_read_32(DDRGRF_BASE + 0x0);
662 mmio_write_32(DDRGRF_BASE + 0x0, BITS_WITH_WMASK(0x0, 0x1, 5));
663 dsb();
664 mmio_write_32(DDRGRF_BASE + 0x0, BITS_WITH_WMASK(0x1, 0x1, 4));
665
666 /* ddr ret sel */
667 ddr_data.pmugrf_soc_con0 =
668 mmio_read_32(PMUGRF_BASE + PMUGRF_SOC_CON(0));
669 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(0),
670 BITS_WITH_WMASK(0x0, 0x1, 12));
671}
672
673static void ddr_sleep_config_restore(void)
674{
675 /* restore ddr ret sel */
676 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(0),
677 ddr_data.pmugrf_soc_con0 | BITS_WMSK(0x1, 12));
678
679 /* restore ddr pmu ctrl */
680 mmio_write_32(DDRGRF_BASE + 0x0,
681 ddr_data.ddrgrf_con0 | BITS_WMSK(0x1, 4));
682 dsb();
683 mmio_write_32(DDRGRF_BASE + 0x0,
684 ddr_data.ddrgrf_con0 | BITS_WMSK(0x1, 5));
685
686 /* restore ddr standby */
687 mmio_write_32(DDR_STDBY_BASE + 0x0,
688 ddr_data.ddrstdby_con0 | BITS_WMSK(0x1, 0));
689
690 /* restore ddr auto gt */
691 mmio_write_32(DDRGRF_BASE + 0x4,
692 ddr_data.ddrgrf_con1 | BITS_WMSK(0x1f, 0));
693
694 /* restore ddr pd, sr */
695 mmio_write_32(DDR_UPCTL_BASE + 0x30,
696 ddr_data.ddrc_pwrctrl | BITS_WMSK(0x3, 0));
697}
698
699static void pmu_sleep_config(void)
700{
701 uint32_t pwrmd_core_lo, pwrmd_core_hi, pwrmd_com_lo, pwrmd_com_hi;
702 uint32_t pmu_wkup_cfg2_lo;
703 uint32_t clk_freq_khz;
704
705 /* save pmic_sleep iomux gpio0_a4 */
706 ddr_data.pmic_slp_iomux = mmio_read_32(PMUGRF_BASE + GPIO0A_IOMUX);
707
708 ddr_data.pmu_pwrmd_core_l =
709 mmio_read_32(PMU_BASE + PMU_PWRMODE_CORE_LO);
710 ddr_data.pmu_pwrmd_core_h =
711 mmio_read_32(PMU_BASE + PMU_PWRMODE_CORE_HI);
712 ddr_data.pmu_pwrmd_cmm_l =
713 mmio_read_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO);
714 ddr_data.pmu_pwrmd_cmm_h =
715 mmio_read_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI);
716 ddr_data.pmu_wkup_cfg2_l = mmio_read_32(PMU_BASE + PMU_WKUP_CFG2_LO);
717
718 pwrmd_core_lo = BIT(pmu_global_int_dis) |
719 BIT(pmu_core_src_gt) |
720 BIT(pmu_cpu0_pd) |
721 BIT(pmu_clr_core) |
722 BIT(pmu_scu_pd) |
723 BIT(pmu_l2_idle) |
724 BIT(pmu_l2_flush) |
725 BIT(pmu_clr_bus2main) |
726 BIT(pmu_clr_peri2msch);
727
728 pwrmd_core_hi = BIT(pmu_dpll_pd_en) |
729 BIT(pmu_apll_pd_en) |
730 BIT(pmu_cpll_pd_en) |
731 BIT(pmu_gpll_pd_en) |
732 BIT(pmu_npll_pd_en);
733
734 pwrmd_com_lo = BIT(pmu_mode_en) |
735 BIT(pmu_pll_pd) |
736 BIT(pmu_pmu_use_if) |
737 BIT(pmu_alive_use_if) |
738 BIT(pmu_osc_dis) |
739 BIT(pmu_sref_enter) |
740 BIT(pmu_ddrc_gt) |
741 BIT(pmu_clr_pmu) |
742 BIT(pmu_clr_peri_pmu);
743
744 pwrmd_com_hi = BIT(pmu_clr_bus) |
745 BIT(pmu_clr_msch) |
746 BIT(pmu_wakeup_begin_cfg);
747
748 pmu_wkup_cfg2_lo = BIT(pmu_cluster_wkup_en) |
749 BIT(pmu_gpio_wkup_en) |
750 BIT(pmu_timer_wkup_en);
751
752 /* set pmic_sleep iomux gpio0_a4 */
753 mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX,
754 BITS_WITH_WMASK(1, 0x3, 8));
755
756 clk_freq_khz = 32;
757
758 mmio_write_32(PMU_BASE + PMU_OSC_CNT_LO,
759 WITH_16BITS_WMSK(clk_freq_khz * 32 & 0xffff));
760 mmio_write_32(PMU_BASE + PMU_OSC_CNT_HI,
761 WITH_16BITS_WMSK(clk_freq_khz * 32 >> 16));
762
763 mmio_write_32(PMU_BASE + PMU_STABLE_CNT_LO,
764 WITH_16BITS_WMSK(clk_freq_khz * 32 & 0xffff));
765 mmio_write_32(PMU_BASE + PMU_STABLE_CNT_HI,
766 WITH_16BITS_WMSK(clk_freq_khz * 32 >> 16));
767
768 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_LO,
769 WITH_16BITS_WMSK(clk_freq_khz * 2 & 0xffff));
770 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_HI,
771 WITH_16BITS_WMSK(clk_freq_khz * 2 >> 16));
772
773 /* Pmu's clk has switched to 24M back When pmu FSM counts
774 * the follow counters, so we should use 24M to calculate
775 * these counters.
776 */
777 mmio_write_32(PMU_BASE + PMU_SCU_PWRDN_CNT_LO,
778 WITH_16BITS_WMSK(24000 * 2 & 0xffff));
779 mmio_write_32(PMU_BASE + PMU_SCU_PWRDN_CNT_HI,
780 WITH_16BITS_WMSK(24000 * 2 >> 16));
781
782 mmio_write_32(PMU_BASE + PMU_SCU_PWRUP_CNT_LO,
783 WITH_16BITS_WMSK(24000 * 2 & 0xffff));
784 mmio_write_32(PMU_BASE + PMU_SCU_PWRUP_CNT_HI,
785 WITH_16BITS_WMSK(24000 * 2 >> 16));
786
787 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT_LO,
788 WITH_16BITS_WMSK(24000 * 5 & 0xffff));
789 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT_HI,
790 WITH_16BITS_WMSK(24000 * 5 >> 16));
791
792 mmio_write_32(PMU_BASE + PMU_PLLRST_CNT_LO,
793 WITH_16BITS_WMSK(24000 * 2 & 0xffff));
794 mmio_write_32(PMU_BASE + PMU_PLLRST_CNT_HI,
795 WITH_16BITS_WMSK(24000 * 2 >> 16));
796
797 /* Config pmu power mode and pmu wakeup source */
798 mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_LO,
799 WITH_16BITS_WMSK(pwrmd_core_lo));
800 mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_HI,
801 WITH_16BITS_WMSK(pwrmd_core_hi));
802
803 mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO,
804 WITH_16BITS_WMSK(pwrmd_com_lo));
805 mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI,
806 WITH_16BITS_WMSK(pwrmd_com_hi));
807
808 mmio_write_32(PMU_BASE + PMU_WKUP_CFG2_LO,
809 WITH_16BITS_WMSK(pmu_wkup_cfg2_lo));
810}
811
812static void pmu_sleep_restore(void)
813{
814 mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_LO,
815 WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_core_l));
816 mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_HI,
817 WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_core_h));
818 mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO,
819 WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_cmm_l));
820 mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI,
821 WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_cmm_h));
822 mmio_write_32(PMU_BASE + PMU_WKUP_CFG2_LO,
823 WITH_16BITS_WMSK(ddr_data.pmu_wkup_cfg2_l));
824
825 /* restore pmic_sleep iomux */
826 mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX,
827 WITH_16BITS_WMSK(ddr_data.pmic_slp_iomux));
828}
829
830static void soc_sleep_config(void)
831{
832 ddr_data.gpio0c_iomux = mmio_read_32(PMUGRF_BASE + GPIO0C_IOMUX);
833
834 pmu_sleep_config();
835
836 ddr_sleep_config();
837
838 pvtm_32k_config();
839}
840
841static void soc_sleep_restore(void)
842{
843 secure_timer_init();
844
845 pvtm_32k_config_restore();
846
847 ddr_sleep_config_restore();
848
849 pmu_sleep_restore();
850
851 mmio_write_32(PMUGRF_BASE + GPIO0C_IOMUX,
852 WITH_16BITS_WMSK(ddr_data.gpio0c_iomux));
853}
854
855static inline void pm_pll_wait_lock(uint32_t pll_base, uint32_t pll_id)
856{
857 uint32_t delay = PLL_LOCKED_TIMEOUT;
858
859 while (delay > 0) {
860 if (mmio_read_32(pll_base + PLL_CON(1)) &
861 PLL_LOCK_MSK)
862 break;
863 delay--;
864 }
865
866 if (delay == 0)
867 ERROR("Can't wait pll:%d lock\n", pll_id);
868}
869
870static inline void pll_pwr_ctr(uint32_t pll_base, uint32_t pll_id, uint32_t pd)
871{
872 mmio_write_32(pll_base + PLL_CON(1),
Ambroise Vincente499f632019-07-25 16:06:50 +0100873 BITS_WITH_WMASK(1, 1U, 15));
XiaoDong Huang83f79a82019-06-13 10:55:50 +0800874 if (pd)
875 mmio_write_32(pll_base + PLL_CON(1),
876 BITS_WITH_WMASK(1, 1, 14));
877 else
878 mmio_write_32(pll_base + PLL_CON(1),
879 BITS_WITH_WMASK(0, 1, 14));
880}
881
882static inline void pll_set_mode(uint32_t pll_id, uint32_t mode)
883{
884 uint32_t val = BITS_WITH_WMASK(mode, 0x3, PLL_MODE_SHIFT(pll_id));
885
886 if (pll_id != GPLL_ID)
887 mmio_write_32(CRU_BASE + CRU_MODE, val);
888 else
889 mmio_write_32(PMUCRU_BASE + CRU_PMU_MODE,
890 BITS_WITH_WMASK(mode, 0x3, 0));
891}
892
893static inline void pll_suspend(uint32_t pll_id)
894{
895 int i;
896 uint32_t pll_base;
897
898 if (pll_id != GPLL_ID)
899 pll_base = CRU_BASE + CRU_PLL_CONS(pll_id, 0);
900 else
901 pll_base = PMUCRU_BASE + CRU_PLL_CONS(0, 0);
902
903 /* save pll con */
904 for (i = 0; i < PLL_CON_CNT; i++)
905 ddr_data.cru_plls_con_save[pll_id][i] =
906 mmio_read_32(pll_base + PLL_CON(i));
907
908 /* slow mode */
909 pll_set_mode(pll_id, SLOW_MODE);
910}
911
912static inline void pll_resume(uint32_t pll_id)
913{
914 uint32_t mode, pll_base;
915
916 if (pll_id != GPLL_ID) {
917 pll_base = CRU_BASE + CRU_PLL_CONS(pll_id, 0);
918 mode = (ddr_data.cru_mode_save >> PLL_MODE_SHIFT(pll_id)) & 0x3;
919 } else {
920 pll_base = PMUCRU_BASE + CRU_PLL_CONS(0, 0);
921 mode = ddr_data.cru_pmu_mode_save & 0x3;
922 }
923
924 /* if pll locked before suspend, we should wait atfer resume */
925 if (ddr_data.cru_plls_con_save[pll_id][1] & PLL_LOCK_MSK)
926 pm_pll_wait_lock(pll_base, pll_id);
927
928 pll_set_mode(pll_id, mode);
929}
930
931static void pm_plls_suspend(void)
932{
933 ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_MODE);
934 ddr_data.cru_pmu_mode_save = mmio_read_32(PMUCRU_BASE + CRU_PMU_MODE);
935 ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSELS_CON(0));
936
937 pll_suspend(GPLL_ID);
938 pll_suspend(NPLL_ID);
939 pll_suspend(CPLL_ID);
940 pll_suspend(APLL_ID);
941
942 /* core */
943 mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
944 BITS_WITH_WMASK(0, 0xf, 0));
945
946 /* pclk_dbg */
947 mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
948 BITS_WITH_WMASK(0, 0xf, 8));
949}
950
951static void pm_plls_resume(void)
952{
953 /* pclk_dbg */
954 mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
955 ddr_data.clk_sel0 | BITS_WMSK(0xf, 8));
956
957 /* core */
958 mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
959 ddr_data.clk_sel0 | BITS_WMSK(0xf, 0));
960
961 pll_resume(APLL_ID);
962 pll_resume(CPLL_ID);
963 pll_resume(NPLL_ID);
964 pll_resume(GPLL_ID);
965}
966
967int rockchip_soc_sys_pwr_dm_suspend(void)
968{
969 pmu_power_domains_suspend();
970
971 clk_gate_suspend();
972
973 soc_sleep_config();
974
975 pm_plls_suspend();
976
977 psram_boot_cfg->pm_flag &= ~PM_WARM_BOOT_BIT;
978
979 return 0;
980}
981
982int rockchip_soc_sys_pwr_dm_resume(void)
983{
984 psram_boot_cfg->pm_flag |= PM_WARM_BOOT_BIT;
985
986 pm_plls_resume();
987
988 soc_sleep_restore();
989
990 clk_gate_resume();
991
992 pmu_power_domains_resume();
993
994 plat_rockchip_gic_cpuif_enable();
995
996 return 0;
997}
998
999void __dead2 rockchip_soc_soft_reset(void)
1000{
1001 pll_set_mode(GPLL_ID, SLOW_MODE);
1002 pll_set_mode(CPLL_ID, SLOW_MODE);
1003 pll_set_mode(NPLL_ID, SLOW_MODE);
1004 pll_set_mode(APLL_ID, SLOW_MODE);
1005 dsb();
1006
1007 mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
1008 dsb();
1009
1010 /*
1011 * Maybe the HW needs some times to reset the system,
1012 * so we do not hope the core to execute valid codes.
1013 */
1014 psci_power_down_wfi();
1015}
1016
1017void __dead2 rockchip_soc_system_off(void)
1018{
1019 uint32_t val;
1020
1021 /* set pmic_sleep pin(gpio0_a4) to gpio mode */
1022 mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX, BITS_WITH_WMASK(0, 0x3, 8));
1023
1024 /* config output */
1025 val = mmio_read_32(GPIO0_BASE + SWPORTA_DDR);
1026 val |= BIT(4);
1027 mmio_write_32(GPIO0_BASE + SWPORTA_DDR, val);
1028
1029 /* config output high level */
1030 val = mmio_read_32(GPIO0_BASE);
1031 val |= BIT(4);
1032 mmio_write_32(GPIO0_BASE, val);
1033 dsb();
1034
1035 /*
1036 * Maybe the HW needs some times to reset the system,
1037 * so we do not hope the core to execute valid codes.
1038 */
1039 psci_power_down_wfi();
1040}
1041
1042void rockchip_plat_mmu_el3(void)
1043{
1044 /* TODO: support the el3 for px30 SoCs */
1045}
1046
1047void plat_rockchip_pmu_init(void)
1048{
1049 uint32_t cpu;
1050
1051 rockchip_pd_lock_init();
1052
1053 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
1054 cpuson_flags[cpu] = 0;
1055
1056 psram_boot_cfg->ddr_func = (uint64_t)0;
1057 psram_boot_cfg->ddr_data = (uint64_t)0;
1058 psram_boot_cfg->sp = PSRAM_SP_TOP;
1059 psram_boot_cfg->ddr_flag = 0x0;
1060 psram_boot_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
1061 psram_boot_cfg->pm_flag = PM_WARM_BOOT_BIT;
1062
1063 nonboot_cpus_off();
1064
1065 /* Remap pmu_sram's base address to boot address */
1066 mmio_write_32(PMUSGRF_BASE + PMUSGRF_SOC_CON(0),
1067 BITS_WITH_WMASK(1, 0x1, 13));
1068
1069 INFO("%s: pd status %x\n",
1070 __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
1071}