blob: cb323e6ee931a55885b99468ce707536f2f64374 [file] [log] [blame]
Tony Xief6118cc2016-01-15 17:17:32 +08001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Tony Xief6118cc2016-01-15 17:17:32 +08005 */
6
7#include <arch_helpers.h>
8#include <assert.h>
Isla Mitchelle3631462017-07-14 10:46:32 +01009#include <ddr_rk3368.h>
Tony Xief6118cc2016-01-15 17:17:32 +080010#include <debug.h>
11#include <delay_timer.h>
12#include <errno.h>
13#include <mmio.h>
Isla Mitchelle3631462017-07-14 10:46:32 +010014#include <plat_private.h>
Tony Xief6118cc2016-01-15 17:17:32 +080015#include <platform.h>
16#include <platform_def.h>
Tony Xief6118cc2016-01-15 17:17:32 +080017#include <pmu.h>
Tony Xief6118cc2016-01-15 17:17:32 +080018#include <pmu_com.h>
Isla Mitchelle3631462017-07-14 10:46:32 +010019#include <rk3368_def.h>
20#include <soc.h>
Tony Xief6118cc2016-01-15 17:17:32 +080021
Tony Xie42e113e2016-07-16 11:16:51 +080022DEFINE_BAKERY_LOCK(rockchip_pd_lock);
23
Caesar Wang59e41b52016-04-10 14:11:07 +080024static uint32_t cpu_warm_boot_addr;
25
Tony Xief6118cc2016-01-15 17:17:32 +080026void rk3368_flash_l2_b(void)
27{
28 uint32_t wait_cnt = 0;
29
30 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
31 dsb();
32
33 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
34 & BIT(clst_b_l2_flsh_done))) {
35 wait_cnt++;
36 if (!(wait_cnt % MAX_WAIT_CONUT))
37 WARN("%s:reg %x,wait\n", __func__,
38 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
39 }
40
41 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
42}
43
44static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
45{
46 uint32_t mask = BIT(req);
47 uint32_t idle_mask = 0;
48 uint32_t idle_target = 0;
49 uint32_t val;
50 uint32_t wait_cnt = 0;
51
52 switch (req) {
53 case bus_ide_req_clst_l:
54 idle_mask = BIT(pmu_idle_ack_cluster_l);
55 idle_target = (idle << pmu_idle_ack_cluster_l);
56 break;
57
58 case bus_ide_req_clst_b:
59 idle_mask = BIT(pmu_idle_ack_cluster_b);
60 idle_target = (idle << pmu_idle_ack_cluster_b);
61 break;
62
63 case bus_ide_req_cxcs:
64 idle_mask = BIT(pmu_idle_ack_cxcs);
65 idle_target = ((!idle) << pmu_idle_ack_cxcs);
66 break;
67
68 case bus_ide_req_cci400:
69 idle_mask = BIT(pmu_idle_ack_cci400);
70 idle_target = ((!idle) << pmu_idle_ack_cci400);
71 break;
72
73 case bus_ide_req_gpu:
74 idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
75 idle_target = (idle << pmu_idle_ack_gpu) |
76 (idle << pmu_idle_gpu);
77 break;
78
79 case bus_ide_req_core:
80 idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
81 idle_target = (idle << pmu_idle_ack_core) |
82 (idle << pmu_idle_core);
83 break;
84
85 case bus_ide_req_bus:
86 idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
87 idle_target = (idle << pmu_idle_ack_bus) |
88 (idle << pmu_idle_bus);
89 break;
90 case bus_ide_req_dma:
91 idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
92 idle_target = (idle << pmu_idle_ack_dma) |
93 (idle << pmu_idle_dma);
94 break;
95
96 case bus_ide_req_peri:
97 idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
98 idle_target = (idle << pmu_idle_ack_peri) |
99 (idle << pmu_idle_peri);
100 break;
101
102 case bus_ide_req_video:
103 idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
104 idle_target = (idle << pmu_idle_ack_video) |
105 (idle << pmu_idle_video);
106 break;
107
108 case bus_ide_req_vio:
109 idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
110 idle_target = (pmu_idle_ack_vio) |
111 (idle << pmu_idle_vio);
112 break;
113
114 case bus_ide_req_alive:
115 idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
116 idle_target = (idle << pmu_idle_ack_alive) |
117 (idle << pmu_idle_alive);
118 break;
119
120 case bus_ide_req_pmu:
121 idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
122 idle_target = (idle << pmu_idle_ack_pmu) |
123 (idle << pmu_idle_pmu);
124 break;
125
126 case bus_ide_req_msch:
127 idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
128 idle_target = (idle << pmu_idle_ack_msch) |
129 (idle << pmu_idle_msch);
130 break;
131
132 case bus_ide_req_cci:
133 idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
134 idle_target = (idle << pmu_idle_ack_cci) |
135 (idle << pmu_idle_cci);
136 break;
137
138 default:
139 ERROR("%s: Unsupported the idle request\n", __func__);
140 break;
141 }
142
143 val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
144 if (idle)
145 val |= mask;
146 else
147 val &= ~mask;
148
149 mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
150
151 while ((mmio_read_32(PMU_BASE +
152 PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
153 wait_cnt++;
154 if (!(wait_cnt % MAX_WAIT_CONUT))
155 WARN("%s:st=%x(%x)\n", __func__,
156 mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
157 idle_mask);
158 }
159
160 return 0;
161}
162
163void pmu_scu_b_pwrup(void)
164{
165 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
166 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
167}
168
169static void pmu_scu_b_pwrdn(void)
170{
171 uint32_t wait_cnt = 0;
172
173 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
174 PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
175 ERROR("%s: not all cpus is off\n", __func__);
176 return;
177 }
178
179 rk3368_flash_l2_b();
180
181 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
182
183 while (!(mmio_read_32(PMU_BASE +
184 PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
185 wait_cnt++;
186 if (!(wait_cnt % MAX_WAIT_CONUT))
187 ERROR("%s:wait cluster-b l2(%x)\n", __func__,
188 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
189 }
190 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
191}
192
193static void pmu_sleep_mode_config(void)
194{
195 uint32_t pwrmd_core, pwrmd_com;
196
197 pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
198 BIT(pmu_mdcr_scu_l_pd) |
199 BIT(pmu_mdcr_l2_flush) |
200 BIT(pmu_mdcr_l2_idle) |
201 BIT(pmu_mdcr_clr_clst_l) |
202 BIT(pmu_mdcr_clr_core) |
203 BIT(pmu_mdcr_clr_cci) |
204 BIT(pmu_mdcr_core_pd);
205
206 pwrmd_com = BIT(pmu_mode_en) |
207 BIT(pmu_mode_sref_enter) |
208 BIT(pmu_mode_pwr_off);
209
210 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
211 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
212 regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
213
214 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
215 mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
216 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
217 mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
218 mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
219 dsb();
220}
221
Tony Xief6118cc2016-01-15 17:17:32 +0800222static void pmu_set_sleep_mode(void)
223{
Tony Xief6118cc2016-01-15 17:17:32 +0800224 pmu_sleep_mode_config();
225 soc_sleep_config();
226 regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
227 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
228 pmu_scu_b_pwrdn();
229 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
Lin Huang30e43392017-05-04 16:02:45 +0800230 ((uintptr_t)&pmu_cpuson_entrypoint >>
231 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
Tony Xief6118cc2016-01-15 17:17:32 +0800232 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
Lin Huang30e43392017-05-04 16:02:45 +0800233 ((uintptr_t)&pmu_cpuson_entrypoint >>
234 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
Tony Xief6118cc2016-01-15 17:17:32 +0800235}
236
237static int cpus_id_power_domain(uint32_t cluster,
238 uint32_t cpu,
239 uint32_t pd_state,
240 uint32_t wfie_msk)
241{
242 uint32_t pd;
243 uint64_t mpidr;
244
245 if (cluster)
246 pd = PD_CPUB0 + cpu;
247 else
248 pd = PD_CPUL0 + cpu;
249
250 if (pmu_power_domain_st(pd) == pd_state)
251 return 0;
252
253 if (pd_state == pmu_pd_off) {
254 mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
255 if (check_cpu_wfie(mpidr, wfie_msk))
256 return -EINVAL;
257 }
258
259 return pmu_power_domain_ctr(pd, pd_state);
260}
261
262static void nonboot_cpus_off(void)
263{
264 uint32_t boot_cpu, boot_cluster, cpu;
265
266 boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
267 boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
268
269 /* turn off noboot cpus */
270 for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
271 if (!boot_cluster && (cpu == boot_cpu))
272 continue;
273 cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
274 }
275
276 for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
277 if (boot_cluster && (cpu == boot_cpu))
278 continue;
279 cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
280 }
281}
282
Lin Huang88dd1232017-05-16 16:40:46 +0800283void sram_save(void)
284{
285 /* TODO: support the sdram save for rk3368 SoCs*/
286}
287
288void sram_restore(void)
289{
290 /* TODO: support the sdram restore for rk3368 SoCs */
291}
292
tony.xie3ecb0212017-03-03 16:22:12 +0800293int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
Tony Xief6118cc2016-01-15 17:17:32 +0800294{
295 uint32_t cpu, cluster;
296 uint32_t cpuon_id;
297
298 cpu = MPIDR_AFFLVL0_VAL(mpidr);
299 cluster = MPIDR_AFFLVL1_VAL(mpidr);
300
301 /* Make sure the cpu is off,Before power up the cpu! */
302 cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
303
304 cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
Sandrine Bailleuxbd1a3742016-05-05 10:04:15 +0100305 assert(cpuon_id < PLATFORM_CORE_COUNT);
Tony Xief6118cc2016-01-15 17:17:32 +0800306 assert(cpuson_flags[cpuon_id] == 0);
307 cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
308 cpuson_entry_point[cpuon_id] = entrypoint;
309
310 /* Switch boot addr to pmusram */
311 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
Caesar Wang59e41b52016-04-10 14:11:07 +0800312 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
Tony Xief6118cc2016-01-15 17:17:32 +0800313 CPU_BOOT_ADDR_WMASK);
314 dsb();
315
316 cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
317
318 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
319 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
320 CPU_BOOT_ADDR_WMASK);
321
322 return 0;
323}
324
tony.xie3ecb0212017-03-03 16:22:12 +0800325int rockchip_soc_cores_pwr_dm_on_finish(void)
Tony Xief6118cc2016-01-15 17:17:32 +0800326{
Tony Xief6118cc2016-01-15 17:17:32 +0800327 return 0;
328}
329
tony.xie3ecb0212017-03-03 16:22:12 +0800330int rockchip_soc_sys_pwr_dm_resume(void)
Tony Xief6118cc2016-01-15 17:17:32 +0800331{
Caesar Wang59e41b52016-04-10 14:11:07 +0800332 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
333 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
334 CPU_BOOT_ADDR_WMASK);
335 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
336 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
337 CPU_BOOT_ADDR_WMASK);
Tony Xief6118cc2016-01-15 17:17:32 +0800338 pm_plls_resume();
339 pmu_scu_b_pwrup();
340
341 return 0;
342}
343
tony.xie3ecb0212017-03-03 16:22:12 +0800344int rockchip_soc_sys_pwr_dm_suspend(void)
Tony Xief6118cc2016-01-15 17:17:32 +0800345{
346 nonboot_cpus_off();
347 pmu_set_sleep_mode();
348
Tony Xief6118cc2016-01-15 17:17:32 +0800349 return 0;
350}
351
Lin Huang30e43392017-05-04 16:02:45 +0800352void rockchip_plat_mmu_el3(void)
353{
354 /* TODO: support the el3 for rk3368 SoCs */
355}
356
Tony Xief6118cc2016-01-15 17:17:32 +0800357void plat_rockchip_pmu_init(void)
358{
359 uint32_t cpu;
360
Caesar Wang59e41b52016-04-10 14:11:07 +0800361 /* register requires 32bits mode, switch it to 32 bits */
362 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
363
Tony Xief6118cc2016-01-15 17:17:32 +0800364 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
365 cpuson_flags[cpu] = 0;
366
Tony Xief6118cc2016-01-15 17:17:32 +0800367 nonboot_cpus_off();
368 INFO("%s(%d): pd status %x\n", __func__, __LINE__,
369 mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
370}