blob: f44e7cf9b3e00d11804c9f1c23b1ea90a72d1198 [file] [log] [blame]
Tony Xief6118cc2016-01-15 17:17:32 +08001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
Antonio Nino Diaz493bf332016-12-14 14:31:32 +000014 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
Tony Xief6118cc2016-01-15 17:17:32 +080018 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
32#include <assert.h>
33#include <debug.h>
34#include <delay_timer.h>
35#include <errno.h>
36#include <mmio.h>
37#include <platform.h>
38#include <platform_def.h>
39#include <plat_private.h>
40#include <rk3368_def.h>
41#include <pmu_sram.h>
42#include <soc.h>
43#include <pmu.h>
44#include <ddr_rk3368.h>
45#include <pmu_com.h>
46
Tony Xie42e113e2016-07-16 11:16:51 +080047DEFINE_BAKERY_LOCK(rockchip_pd_lock);
48
Tony Xief6118cc2016-01-15 17:17:32 +080049static struct psram_data_t *psram_sleep_cfg =
50 (struct psram_data_t *)PSRAM_DT_BASE;
51
Caesar Wang59e41b52016-04-10 14:11:07 +080052static uint32_t cpu_warm_boot_addr;
53
Tony Xief6118cc2016-01-15 17:17:32 +080054void rk3368_flash_l2_b(void)
55{
56 uint32_t wait_cnt = 0;
57
58 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
59 dsb();
60
61 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
62 & BIT(clst_b_l2_flsh_done))) {
63 wait_cnt++;
64 if (!(wait_cnt % MAX_WAIT_CONUT))
65 WARN("%s:reg %x,wait\n", __func__,
66 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
67 }
68
69 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
70}
71
72static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
73{
74 uint32_t mask = BIT(req);
75 uint32_t idle_mask = 0;
76 uint32_t idle_target = 0;
77 uint32_t val;
78 uint32_t wait_cnt = 0;
79
80 switch (req) {
81 case bus_ide_req_clst_l:
82 idle_mask = BIT(pmu_idle_ack_cluster_l);
83 idle_target = (idle << pmu_idle_ack_cluster_l);
84 break;
85
86 case bus_ide_req_clst_b:
87 idle_mask = BIT(pmu_idle_ack_cluster_b);
88 idle_target = (idle << pmu_idle_ack_cluster_b);
89 break;
90
91 case bus_ide_req_cxcs:
92 idle_mask = BIT(pmu_idle_ack_cxcs);
93 idle_target = ((!idle) << pmu_idle_ack_cxcs);
94 break;
95
96 case bus_ide_req_cci400:
97 idle_mask = BIT(pmu_idle_ack_cci400);
98 idle_target = ((!idle) << pmu_idle_ack_cci400);
99 break;
100
101 case bus_ide_req_gpu:
102 idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
103 idle_target = (idle << pmu_idle_ack_gpu) |
104 (idle << pmu_idle_gpu);
105 break;
106
107 case bus_ide_req_core:
108 idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
109 idle_target = (idle << pmu_idle_ack_core) |
110 (idle << pmu_idle_core);
111 break;
112
113 case bus_ide_req_bus:
114 idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
115 idle_target = (idle << pmu_idle_ack_bus) |
116 (idle << pmu_idle_bus);
117 break;
118 case bus_ide_req_dma:
119 idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
120 idle_target = (idle << pmu_idle_ack_dma) |
121 (idle << pmu_idle_dma);
122 break;
123
124 case bus_ide_req_peri:
125 idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
126 idle_target = (idle << pmu_idle_ack_peri) |
127 (idle << pmu_idle_peri);
128 break;
129
130 case bus_ide_req_video:
131 idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
132 idle_target = (idle << pmu_idle_ack_video) |
133 (idle << pmu_idle_video);
134 break;
135
136 case bus_ide_req_vio:
137 idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
138 idle_target = (pmu_idle_ack_vio) |
139 (idle << pmu_idle_vio);
140 break;
141
142 case bus_ide_req_alive:
143 idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
144 idle_target = (idle << pmu_idle_ack_alive) |
145 (idle << pmu_idle_alive);
146 break;
147
148 case bus_ide_req_pmu:
149 idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
150 idle_target = (idle << pmu_idle_ack_pmu) |
151 (idle << pmu_idle_pmu);
152 break;
153
154 case bus_ide_req_msch:
155 idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
156 idle_target = (idle << pmu_idle_ack_msch) |
157 (idle << pmu_idle_msch);
158 break;
159
160 case bus_ide_req_cci:
161 idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
162 idle_target = (idle << pmu_idle_ack_cci) |
163 (idle << pmu_idle_cci);
164 break;
165
166 default:
167 ERROR("%s: Unsupported the idle request\n", __func__);
168 break;
169 }
170
171 val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
172 if (idle)
173 val |= mask;
174 else
175 val &= ~mask;
176
177 mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
178
179 while ((mmio_read_32(PMU_BASE +
180 PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
181 wait_cnt++;
182 if (!(wait_cnt % MAX_WAIT_CONUT))
183 WARN("%s:st=%x(%x)\n", __func__,
184 mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
185 idle_mask);
186 }
187
188 return 0;
189}
190
191void pmu_scu_b_pwrup(void)
192{
193 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
194 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
195}
196
197static void pmu_scu_b_pwrdn(void)
198{
199 uint32_t wait_cnt = 0;
200
201 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
202 PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
203 ERROR("%s: not all cpus is off\n", __func__);
204 return;
205 }
206
207 rk3368_flash_l2_b();
208
209 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
210
211 while (!(mmio_read_32(PMU_BASE +
212 PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
213 wait_cnt++;
214 if (!(wait_cnt % MAX_WAIT_CONUT))
215 ERROR("%s:wait cluster-b l2(%x)\n", __func__,
216 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
217 }
218 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
219}
220
221static void pmu_sleep_mode_config(void)
222{
223 uint32_t pwrmd_core, pwrmd_com;
224
225 pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
226 BIT(pmu_mdcr_scu_l_pd) |
227 BIT(pmu_mdcr_l2_flush) |
228 BIT(pmu_mdcr_l2_idle) |
229 BIT(pmu_mdcr_clr_clst_l) |
230 BIT(pmu_mdcr_clr_core) |
231 BIT(pmu_mdcr_clr_cci) |
232 BIT(pmu_mdcr_core_pd);
233
234 pwrmd_com = BIT(pmu_mode_en) |
235 BIT(pmu_mode_sref_enter) |
236 BIT(pmu_mode_pwr_off);
237
238 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
239 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
240 regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
241
242 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
243 mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
244 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
245 mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
246 mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
247 dsb();
248}
249
250static void ddr_suspend_save(void)
251{
252 ddr_reg_save(1, psram_sleep_cfg->ddr_data);
253}
254
255static void pmu_set_sleep_mode(void)
256{
257 ddr_suspend_save();
258 pmu_sleep_mode_config();
259 soc_sleep_config();
260 regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
261 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
262 pmu_scu_b_pwrdn();
263 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
264 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
265 CPU_BOOT_ADDR_WMASK);
266 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
267 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
268 CPU_BOOT_ADDR_WMASK);
269}
270
271void plat_rockchip_pmusram_prepare(void)
272{
273 uint32_t *sram_dst, *sram_src;
274 size_t sram_size = 2;
Soby Mathew940f41b2016-04-12 14:04:29 +0100275 uint32_t code_size;
Tony Xief6118cc2016-01-15 17:17:32 +0800276
277 /* pmu sram code and data prepare */
278 sram_dst = (uint32_t *)PMUSRAM_BASE;
279 sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
280 sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
281 (uint32_t *)sram_src;
282 u32_align_cpy(sram_dst, sram_src, sram_size);
283
284 /* ddr code */
285 sram_dst += sram_size;
286 sram_src = ddr_get_resume_code_base();
287 code_size = ddr_get_resume_code_size();
288 u32_align_cpy(sram_dst, sram_src, code_size / 4);
289 psram_sleep_cfg->ddr_func = (uint64_t)sram_dst;
290
291 /* ddr data */
292 sram_dst += (code_size / 4);
Tony Xief6118cc2016-01-15 17:17:32 +0800293 psram_sleep_cfg->ddr_data = (uint64_t)sram_dst;
294
Soby Mathew940f41b2016-04-12 14:04:29 +0100295 assert((uint64_t)(sram_dst + ddr_get_resume_data_size() / 4)
296 < PSRAM_SP_BOTTOM);
Tony Xief6118cc2016-01-15 17:17:32 +0800297 psram_sleep_cfg->sp = PSRAM_SP_TOP;
298}
299
300static int cpus_id_power_domain(uint32_t cluster,
301 uint32_t cpu,
302 uint32_t pd_state,
303 uint32_t wfie_msk)
304{
305 uint32_t pd;
306 uint64_t mpidr;
307
308 if (cluster)
309 pd = PD_CPUB0 + cpu;
310 else
311 pd = PD_CPUL0 + cpu;
312
313 if (pmu_power_domain_st(pd) == pd_state)
314 return 0;
315
316 if (pd_state == pmu_pd_off) {
317 mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
318 if (check_cpu_wfie(mpidr, wfie_msk))
319 return -EINVAL;
320 }
321
322 return pmu_power_domain_ctr(pd, pd_state);
323}
324
325static void nonboot_cpus_off(void)
326{
327 uint32_t boot_cpu, boot_cluster, cpu;
328
329 boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
330 boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
331
332 /* turn off noboot cpus */
333 for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
334 if (!boot_cluster && (cpu == boot_cpu))
335 continue;
336 cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
337 }
338
339 for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
340 if (boot_cluster && (cpu == boot_cpu))
341 continue;
342 cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
343 }
344}
345
346static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
347{
348 uint32_t cpu, cluster;
349 uint32_t cpuon_id;
350
351 cpu = MPIDR_AFFLVL0_VAL(mpidr);
352 cluster = MPIDR_AFFLVL1_VAL(mpidr);
353
354 /* Make sure the cpu is off,Before power up the cpu! */
355 cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
356
357 cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
Sandrine Bailleuxbd1a3742016-05-05 10:04:15 +0100358 assert(cpuon_id < PLATFORM_CORE_COUNT);
Tony Xief6118cc2016-01-15 17:17:32 +0800359 assert(cpuson_flags[cpuon_id] == 0);
360 cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
361 cpuson_entry_point[cpuon_id] = entrypoint;
362
363 /* Switch boot addr to pmusram */
364 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
Caesar Wang59e41b52016-04-10 14:11:07 +0800365 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
Tony Xief6118cc2016-01-15 17:17:32 +0800366 CPU_BOOT_ADDR_WMASK);
367 dsb();
368
369 cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
370
371 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
372 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
373 CPU_BOOT_ADDR_WMASK);
374
375 return 0;
376}
377
378static int cores_pwr_domain_on_finish(void)
379{
Tony Xief6118cc2016-01-15 17:17:32 +0800380 return 0;
381}
382
383static int sys_pwr_domain_resume(void)
384{
Caesar Wang59e41b52016-04-10 14:11:07 +0800385 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
386 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
387 CPU_BOOT_ADDR_WMASK);
388 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
389 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
390 CPU_BOOT_ADDR_WMASK);
Tony Xief6118cc2016-01-15 17:17:32 +0800391 pm_plls_resume();
392 pmu_scu_b_pwrup();
393
394 return 0;
395}
396
397static int sys_pwr_domain_suspend(void)
398{
399 nonboot_cpus_off();
400 pmu_set_sleep_mode();
401
Tony Xief6118cc2016-01-15 17:17:32 +0800402 psram_sleep_cfg->ddr_flag = 0;
403
404 return 0;
405}
406
407static struct rockchip_pm_ops_cb pm_ops = {
408 .cores_pwr_dm_on = cores_pwr_domain_on,
409 .cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
410 .sys_pwr_dm_suspend = sys_pwr_domain_suspend,
411 .sys_pwr_dm_resume = sys_pwr_domain_resume,
412 .sys_gbl_soft_reset = soc_sys_global_soft_reset,
413};
414
415void plat_rockchip_pmu_init(void)
416{
417 uint32_t cpu;
418
419 plat_setup_rockchip_pm_ops(&pm_ops);
420
Caesar Wang59e41b52016-04-10 14:11:07 +0800421 /* register requires 32bits mode, switch it to 32 bits */
422 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
423
Tony Xief6118cc2016-01-15 17:17:32 +0800424 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
425 cpuson_flags[cpu] = 0;
426
Tony Xief6118cc2016-01-15 17:17:32 +0800427 psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
428
429 nonboot_cpus_off();
430 INFO("%s(%d): pd status %x\n", __func__, __LINE__,
431 mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
432}