blob: f173a164945baabfbf235757a5da8fb6277bf697 [file] [log] [blame]
Jacky Baia6177002019-03-06 17:15:06 +08001/*
Jacky Bai31f02322019-12-11 16:26:59 +08002 * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
Jacky Baia6177002019-03-06 17:15:06 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <stdlib.h>
8#include <stdint.h>
9#include <stdbool.h>
10
11#include <common/debug.h>
12#include <drivers/delay_timer.h>
13#include <lib/mmio.h>
14#include <lib/psci/psci.h>
15#include <lib/smccc.h>
16#include <platform_def.h>
17#include <services/std_svc.h>
18
19#include <gpc.h>
20#include <imx_sip_svc.h>
21
Jacky Bai7512abc2020-03-23 15:54:01 +080022#define CCGR(x) (0x4000 + (x) * 16)
Jacky Bai31f02322019-12-11 16:26:59 +080023
24enum pu_domain_id {
25 HSIOMIX,
26 PCIE,
27 OTG1,
28 OTG2,
29 GPUMIX,
30 VPUMIX,
31 VPU_G1,
32 VPU_G2,
33 VPU_H1,
34 DISPMIX,
35 MIPI,
36 /* below two domain only for ATF internal use */
37 GPU2D,
38 GPU3D,
39 MAX_DOMAINS,
40};
41
42/* PU domain */
43static struct imx_pwr_domain pu_domains[] = {
44 IMX_MIX_DOMAIN(HSIOMIX, false),
45 IMX_PD_DOMAIN(PCIE, false),
46 IMX_PD_DOMAIN(OTG1, true),
47 IMX_PD_DOMAIN(OTG2, true),
48 IMX_MIX_DOMAIN(GPUMIX, false),
49 IMX_MIX_DOMAIN(VPUMIX, false),
50 IMX_PD_DOMAIN(VPU_G1, false),
51 IMX_PD_DOMAIN(VPU_G2, false),
52 IMX_PD_DOMAIN(VPU_H1, false),
53 IMX_MIX_DOMAIN(DISPMIX, false),
54 IMX_PD_DOMAIN(MIPI, false),
55 /* below two domain only for ATF internal use */
56 IMX_MIX_DOMAIN(GPU2D, false),
57 IMX_MIX_DOMAIN(GPU3D, false),
58};
59
60static unsigned int pu_domain_status;
61
62#define GPU_RCR 0x40
63#define VPU_RCR 0x44
64
65#define VPU_CTL_BASE 0x38330000
66#define BLK_SFT_RSTN_CSR 0x0
67#define H1_SFT_RSTN BIT(2)
68#define G1_SFT_RSTN BIT(1)
69#define G2_SFT_RSTN BIT(0)
70
71#define DISP_CTL_BASE 0x32e28000
72
73void vpu_sft_reset_assert(uint32_t domain_id)
74{
75 uint32_t val;
76
77 val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR);
78
79 switch (domain_id) {
80 case VPU_G1:
81 val &= ~G1_SFT_RSTN;
82 mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
83 break;
84 case VPU_G2:
85 val &= ~G2_SFT_RSTN;
86 mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
87 break;
88 case VPU_H1:
89 val &= ~H1_SFT_RSTN;
90 mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
91 break;
92 default:
93 break;
94 }
95}
96
97void vpu_sft_reset_deassert(uint32_t domain_id)
98{
99 uint32_t val;
100
101 val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR);
102
103 switch (domain_id) {
104 case VPU_G1:
105 val |= G1_SFT_RSTN;
106 mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
107 break;
108 case VPU_G2:
109 val |= G2_SFT_RSTN;
110 mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
111 break;
112 case VPU_H1:
113 val |= H1_SFT_RSTN;
114 mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
115 break;
116 default:
117 break;
118 }
119}
120
121void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on)
122{
123 if (domain_id >= MAX_DOMAINS) {
124 return;
125 }
126
127 struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id];
128
129 if (on) {
130 pu_domain_status |= (1 << domain_id);
131
132 if (domain_id == VPU_G1 || domain_id == VPU_G2 ||
133 domain_id == VPU_H1) {
134 vpu_sft_reset_assert(domain_id);
135 }
136
137 /* HSIOMIX has no PU bit, so skip for it */
138 if (domain_id != HSIOMIX) {
139 /* clear the PGC bit */
140 mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
141
142 /* power up the domain */
143 mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req);
144
145 /* wait for power request done */
146 while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) {
147 ;
148 }
149 }
150
151 if (domain_id == VPU_G1 || domain_id == VPU_G2 ||
152 domain_id == VPU_H1) {
153 vpu_sft_reset_deassert(domain_id);
154 /* dealy for a while to make sure reset done */
155 udelay(100);
156 }
157
158 if (domain_id == GPUMIX) {
159 /* assert reset */
160 mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x1);
161
162 /* power up GPU2D */
163 mmio_clrbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1);
164
165 mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU2D_PWR_REQ);
166
167 /* wait for power request done */
168 while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU2D_PWR_REQ) {
169 ;
170 }
171
172 udelay(1);
173
174 /* power up GPU3D */
175 mmio_clrbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1);
176
177 mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU3D_PWR_REQ);
178
179 /* wait for power request done */
180 while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU3D_PWR_REQ) {
181 ;
182 }
183
184 udelay(10);
185 /* release the gpumix reset */
186 mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x0);
187 udelay(10);
188 }
189
190 /* vpu sft clock enable */
191 if (domain_id == VPUMIX) {
192 mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x1);
193 udelay(5);
194 mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x0);
195 udelay(5);
196
197 /* enable all clock */
198 mmio_write_32(VPU_CTL_BASE + 0x4, 0x7);
199 }
200
201 if (domain_id == DISPMIX) {
202 /* special setting for DISPMIX */
203 mmio_write_32(DISP_CTL_BASE + 0x4, 0x1fff);
204 mmio_write_32(DISP_CTL_BASE, 0x7f);
205 mmio_write_32(DISP_CTL_BASE + 0x8, 0x30000);
206 }
207
208 /* handle the ADB400 sync */
209 if (pwr_domain->need_sync) {
210 /* clear adb power down request */
211 mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
212
213 /* wait for adb power request ack */
214 while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
215 ;
216 }
217 }
218
219 if (domain_id == GPUMIX) {
220 /* power up GPU2D ADB */
221 mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC);
222
223 /* wait for adb power request ack */
224 while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) {
225 ;
226 }
227
228 /* power up GPU3D ADB */
229 mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC);
230
231 /* wait for adb power request ack */
232 while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) {
233 ;
234 }
235 }
236 } else {
237 pu_domain_status &= ~(1 << domain_id);
238
239 if (domain_id == OTG1 || domain_id == OTG2) {
240 return;
241 }
242
243 /* GPU2D & GPU3D ADB power down */
244 if (domain_id == GPUMIX) {
245 mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC);
246
247 /* wait for adb power request ack */
248 while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) {
249 ;
250 }
251
252 mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC);
253
254 /* wait for adb power request ack */
255 while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) {
256 ;
257 }
258 }
259
260 /* handle the ADB400 sync */
261 if (pwr_domain->need_sync) {
262 /* set adb power down request */
263 mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
264
265 /* wait for adb power request ack */
266 while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
267 ;
268 }
269 }
270
271 if (domain_id == GPUMIX) {
272 /* power down GPU2D */
273 mmio_setbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1);
274
275 mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU2D_PWR_REQ);
276
277 /* wait for power request done */
278 while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU2D_PWR_REQ) {
279 ;
280 }
281
282 /* power down GPU3D */
283 mmio_setbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1);
284
285 mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU3D_PWR_REQ);
286
287 /* wait for power request done */
288 while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU3D_PWR_REQ) {
289 ;
290 }
291 }
292
293 /* HSIOMIX has no PU bit, so skip for it */
294 if (domain_id != HSIOMIX) {
295 /* set the PGC bit */
296 mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
297
298 /* power down the domain */
299 mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req);
300
301 /* wait for power request done */
302 while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) {
303 ;
304 }
305 }
306 }
307}
308
Jacky Baia6177002019-03-06 17:15:06 +0800309void imx_gpc_init(void)
310{
311 unsigned int val;
312 int i;
313
314 /* mask all the wakeup irq by default */
315 for (i = 0; i < 4; i++) {
316 mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0);
317 mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0);
318 mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0);
319 mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0);
320 mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0);
321 }
322
323 val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
324 /* use GIC wake_request to wakeup C0~C3 from LPM */
325 val |= 0x30c00000;
326 /* clear the MASTER0 LPM handshake */
327 val &= ~(1 << 6);
328 mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
329
330 /* clear MASTER1 & MASTER2 mapping in CPU0(A53) */
331 mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, (MASTER1_MAPPING |
332 MASTER2_MAPPING));
333
334 /* set all mix/PU in A53 domain */
335 mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xffff);
336
337 /*
338 * Set the CORE & SCU power up timing:
339 * SW = 0x1, SW2ISO = 0x1;
Elyes Haouas2be03c02023-02-13 09:14:48 +0100340 * the CPU CORE and SCU power up timing counter
Jacky Baia6177002019-03-06 17:15:06 +0800341 * is drived by 32K OSC, each domain's power up
342 * latency is (SW + SW2ISO) / 32768
343 */
344 mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(0) + 0x4, 0x81);
345 mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(1) + 0x4, 0x81);
346 mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(2) + 0x4, 0x81);
347 mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(3) + 0x4, 0x81);
348 mmio_write_32(IMX_GPC_BASE + PLAT_PGC_PCR + 0x4, 0x81);
349 mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING,
350 (0x59 << 10) | 0x5B | (0x2 << 20));
351
352 /* set DUMMY PDN/PUP ACK by default for A53 domain */
353 mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53,
354 A53_DUMMY_PUP_ACK | A53_DUMMY_PDN_ACK);
355
356 /* clear DSM by default */
357 val = mmio_read_32(IMX_GPC_BASE + SLPCR);
358 val &= ~SLPCR_EN_DSM;
359 /* enable the fast wakeup wait mode */
360 val |= SLPCR_A53_FASTWUP_WAIT_MODE;
361 /* clear the RBC */
362 val &= ~(0x3f << SLPCR_RBC_COUNT_SHIFT);
363 /* set the STBY_COUNT to 0x5, (128 * 30)us */
364 val &= ~(0x7 << SLPCR_STBY_COUNT_SHFT);
365 val |= (0x5 << SLPCR_STBY_COUNT_SHFT);
366 mmio_write_32(IMX_GPC_BASE + SLPCR, val);
367
368 /*
369 * USB PHY power up needs to make sure RESET bit in SRC is clear,
370 * otherwise, the PU power up bit in GPC will NOT self-cleared.
371 * only need to do it once.
372 */
373 mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
374 mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
Jacky Baia6177002019-03-06 17:15:06 +0800375}