blob: e5e680511a15707f95da7c5b64d77d743683088d [file] [log] [blame]
Tony Xief6118cc2016-01-15 17:17:32 +08001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Tony Xief6118cc2016-01-15 17:17:32 +08005 */
6
7#include <arch_helpers.h>
8#include <assert.h>
9#include <debug.h>
10#include <delay_timer.h>
11#include <errno.h>
12#include <mmio.h>
13#include <platform.h>
14#include <platform_def.h>
15#include <plat_private.h>
16#include <rk3368_def.h>
17#include <pmu_sram.h>
18#include <soc.h>
19#include <pmu.h>
20#include <ddr_rk3368.h>
21#include <pmu_com.h>
22
Tony Xie42e113e2016-07-16 11:16:51 +080023DEFINE_BAKERY_LOCK(rockchip_pd_lock);
24
Tony Xief6118cc2016-01-15 17:17:32 +080025static struct psram_data_t *psram_sleep_cfg =
26 (struct psram_data_t *)PSRAM_DT_BASE;
27
Caesar Wang59e41b52016-04-10 14:11:07 +080028static uint32_t cpu_warm_boot_addr;
29
Tony Xief6118cc2016-01-15 17:17:32 +080030void rk3368_flash_l2_b(void)
31{
32 uint32_t wait_cnt = 0;
33
34 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
35 dsb();
36
37 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
38 & BIT(clst_b_l2_flsh_done))) {
39 wait_cnt++;
40 if (!(wait_cnt % MAX_WAIT_CONUT))
41 WARN("%s:reg %x,wait\n", __func__,
42 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
43 }
44
45 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
46}
47
48static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
49{
50 uint32_t mask = BIT(req);
51 uint32_t idle_mask = 0;
52 uint32_t idle_target = 0;
53 uint32_t val;
54 uint32_t wait_cnt = 0;
55
56 switch (req) {
57 case bus_ide_req_clst_l:
58 idle_mask = BIT(pmu_idle_ack_cluster_l);
59 idle_target = (idle << pmu_idle_ack_cluster_l);
60 break;
61
62 case bus_ide_req_clst_b:
63 idle_mask = BIT(pmu_idle_ack_cluster_b);
64 idle_target = (idle << pmu_idle_ack_cluster_b);
65 break;
66
67 case bus_ide_req_cxcs:
68 idle_mask = BIT(pmu_idle_ack_cxcs);
69 idle_target = ((!idle) << pmu_idle_ack_cxcs);
70 break;
71
72 case bus_ide_req_cci400:
73 idle_mask = BIT(pmu_idle_ack_cci400);
74 idle_target = ((!idle) << pmu_idle_ack_cci400);
75 break;
76
77 case bus_ide_req_gpu:
78 idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
79 idle_target = (idle << pmu_idle_ack_gpu) |
80 (idle << pmu_idle_gpu);
81 break;
82
83 case bus_ide_req_core:
84 idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
85 idle_target = (idle << pmu_idle_ack_core) |
86 (idle << pmu_idle_core);
87 break;
88
89 case bus_ide_req_bus:
90 idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
91 idle_target = (idle << pmu_idle_ack_bus) |
92 (idle << pmu_idle_bus);
93 break;
94 case bus_ide_req_dma:
95 idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
96 idle_target = (idle << pmu_idle_ack_dma) |
97 (idle << pmu_idle_dma);
98 break;
99
100 case bus_ide_req_peri:
101 idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
102 idle_target = (idle << pmu_idle_ack_peri) |
103 (idle << pmu_idle_peri);
104 break;
105
106 case bus_ide_req_video:
107 idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
108 idle_target = (idle << pmu_idle_ack_video) |
109 (idle << pmu_idle_video);
110 break;
111
112 case bus_ide_req_vio:
113 idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
114 idle_target = (pmu_idle_ack_vio) |
115 (idle << pmu_idle_vio);
116 break;
117
118 case bus_ide_req_alive:
119 idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
120 idle_target = (idle << pmu_idle_ack_alive) |
121 (idle << pmu_idle_alive);
122 break;
123
124 case bus_ide_req_pmu:
125 idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
126 idle_target = (idle << pmu_idle_ack_pmu) |
127 (idle << pmu_idle_pmu);
128 break;
129
130 case bus_ide_req_msch:
131 idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
132 idle_target = (idle << pmu_idle_ack_msch) |
133 (idle << pmu_idle_msch);
134 break;
135
136 case bus_ide_req_cci:
137 idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
138 idle_target = (idle << pmu_idle_ack_cci) |
139 (idle << pmu_idle_cci);
140 break;
141
142 default:
143 ERROR("%s: Unsupported the idle request\n", __func__);
144 break;
145 }
146
147 val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
148 if (idle)
149 val |= mask;
150 else
151 val &= ~mask;
152
153 mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
154
155 while ((mmio_read_32(PMU_BASE +
156 PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
157 wait_cnt++;
158 if (!(wait_cnt % MAX_WAIT_CONUT))
159 WARN("%s:st=%x(%x)\n", __func__,
160 mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
161 idle_mask);
162 }
163
164 return 0;
165}
166
167void pmu_scu_b_pwrup(void)
168{
169 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
170 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
171}
172
173static void pmu_scu_b_pwrdn(void)
174{
175 uint32_t wait_cnt = 0;
176
177 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
178 PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
179 ERROR("%s: not all cpus is off\n", __func__);
180 return;
181 }
182
183 rk3368_flash_l2_b();
184
185 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
186
187 while (!(mmio_read_32(PMU_BASE +
188 PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
189 wait_cnt++;
190 if (!(wait_cnt % MAX_WAIT_CONUT))
191 ERROR("%s:wait cluster-b l2(%x)\n", __func__,
192 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
193 }
194 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
195}
196
197static void pmu_sleep_mode_config(void)
198{
199 uint32_t pwrmd_core, pwrmd_com;
200
201 pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
202 BIT(pmu_mdcr_scu_l_pd) |
203 BIT(pmu_mdcr_l2_flush) |
204 BIT(pmu_mdcr_l2_idle) |
205 BIT(pmu_mdcr_clr_clst_l) |
206 BIT(pmu_mdcr_clr_core) |
207 BIT(pmu_mdcr_clr_cci) |
208 BIT(pmu_mdcr_core_pd);
209
210 pwrmd_com = BIT(pmu_mode_en) |
211 BIT(pmu_mode_sref_enter) |
212 BIT(pmu_mode_pwr_off);
213
214 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
215 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
216 regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
217
218 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
219 mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
220 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
221 mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
222 mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
223 dsb();
224}
225
226static void ddr_suspend_save(void)
227{
228 ddr_reg_save(1, psram_sleep_cfg->ddr_data);
229}
230
231static void pmu_set_sleep_mode(void)
232{
233 ddr_suspend_save();
234 pmu_sleep_mode_config();
235 soc_sleep_config();
236 regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
237 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
238 pmu_scu_b_pwrdn();
239 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
240 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
241 CPU_BOOT_ADDR_WMASK);
242 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
243 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
244 CPU_BOOT_ADDR_WMASK);
245}
246
247void plat_rockchip_pmusram_prepare(void)
248{
249 uint32_t *sram_dst, *sram_src;
250 size_t sram_size = 2;
Soby Mathew940f41b2016-04-12 14:04:29 +0100251 uint32_t code_size;
Tony Xief6118cc2016-01-15 17:17:32 +0800252
253 /* pmu sram code and data prepare */
254 sram_dst = (uint32_t *)PMUSRAM_BASE;
255 sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
256 sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
257 (uint32_t *)sram_src;
258 u32_align_cpy(sram_dst, sram_src, sram_size);
259
260 /* ddr code */
261 sram_dst += sram_size;
262 sram_src = ddr_get_resume_code_base();
263 code_size = ddr_get_resume_code_size();
264 u32_align_cpy(sram_dst, sram_src, code_size / 4);
265 psram_sleep_cfg->ddr_func = (uint64_t)sram_dst;
266
267 /* ddr data */
268 sram_dst += (code_size / 4);
Tony Xief6118cc2016-01-15 17:17:32 +0800269 psram_sleep_cfg->ddr_data = (uint64_t)sram_dst;
270
Soby Mathew940f41b2016-04-12 14:04:29 +0100271 assert((uint64_t)(sram_dst + ddr_get_resume_data_size() / 4)
272 < PSRAM_SP_BOTTOM);
Tony Xief6118cc2016-01-15 17:17:32 +0800273 psram_sleep_cfg->sp = PSRAM_SP_TOP;
274}
275
276static int cpus_id_power_domain(uint32_t cluster,
277 uint32_t cpu,
278 uint32_t pd_state,
279 uint32_t wfie_msk)
280{
281 uint32_t pd;
282 uint64_t mpidr;
283
284 if (cluster)
285 pd = PD_CPUB0 + cpu;
286 else
287 pd = PD_CPUL0 + cpu;
288
289 if (pmu_power_domain_st(pd) == pd_state)
290 return 0;
291
292 if (pd_state == pmu_pd_off) {
293 mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
294 if (check_cpu_wfie(mpidr, wfie_msk))
295 return -EINVAL;
296 }
297
298 return pmu_power_domain_ctr(pd, pd_state);
299}
300
301static void nonboot_cpus_off(void)
302{
303 uint32_t boot_cpu, boot_cluster, cpu;
304
305 boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
306 boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
307
308 /* turn off noboot cpus */
309 for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
310 if (!boot_cluster && (cpu == boot_cpu))
311 continue;
312 cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
313 }
314
315 for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
316 if (boot_cluster && (cpu == boot_cpu))
317 continue;
318 cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
319 }
320}
321
tony.xie3ecb0212017-03-03 16:22:12 +0800322int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
Tony Xief6118cc2016-01-15 17:17:32 +0800323{
324 uint32_t cpu, cluster;
325 uint32_t cpuon_id;
326
327 cpu = MPIDR_AFFLVL0_VAL(mpidr);
328 cluster = MPIDR_AFFLVL1_VAL(mpidr);
329
330 /* Make sure the cpu is off,Before power up the cpu! */
331 cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
332
333 cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
Sandrine Bailleuxbd1a3742016-05-05 10:04:15 +0100334 assert(cpuon_id < PLATFORM_CORE_COUNT);
Tony Xief6118cc2016-01-15 17:17:32 +0800335 assert(cpuson_flags[cpuon_id] == 0);
336 cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
337 cpuson_entry_point[cpuon_id] = entrypoint;
338
339 /* Switch boot addr to pmusram */
340 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
Caesar Wang59e41b52016-04-10 14:11:07 +0800341 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
Tony Xief6118cc2016-01-15 17:17:32 +0800342 CPU_BOOT_ADDR_WMASK);
343 dsb();
344
345 cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
346
347 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
348 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
349 CPU_BOOT_ADDR_WMASK);
350
351 return 0;
352}
353
tony.xie3ecb0212017-03-03 16:22:12 +0800354int rockchip_soc_cores_pwr_dm_on_finish(void)
Tony Xief6118cc2016-01-15 17:17:32 +0800355{
Tony Xief6118cc2016-01-15 17:17:32 +0800356 return 0;
357}
358
tony.xie3ecb0212017-03-03 16:22:12 +0800359int rockchip_soc_sys_pwr_dm_resume(void)
Tony Xief6118cc2016-01-15 17:17:32 +0800360{
Caesar Wang59e41b52016-04-10 14:11:07 +0800361 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
362 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
363 CPU_BOOT_ADDR_WMASK);
364 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
365 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
366 CPU_BOOT_ADDR_WMASK);
Tony Xief6118cc2016-01-15 17:17:32 +0800367 pm_plls_resume();
368 pmu_scu_b_pwrup();
369
370 return 0;
371}
372
tony.xie3ecb0212017-03-03 16:22:12 +0800373int rockchip_soc_sys_pwr_dm_suspend(void)
Tony Xief6118cc2016-01-15 17:17:32 +0800374{
375 nonboot_cpus_off();
376 pmu_set_sleep_mode();
377
Tony Xief6118cc2016-01-15 17:17:32 +0800378 psram_sleep_cfg->ddr_flag = 0;
379
380 return 0;
381}
382
Tony Xief6118cc2016-01-15 17:17:32 +0800383void plat_rockchip_pmu_init(void)
384{
385 uint32_t cpu;
386
Caesar Wang59e41b52016-04-10 14:11:07 +0800387 /* register requires 32bits mode, switch it to 32 bits */
388 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
389
Tony Xief6118cc2016-01-15 17:17:32 +0800390 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
391 cpuson_flags[cpu] = 0;
392
Tony Xief6118cc2016-01-15 17:17:32 +0800393 psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
394
395 nonboot_cpus_off();
396 INFO("%s(%d): pd status %x\n", __func__, __LINE__,
397 mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
398}