blob: 17649ffbf9cbc08cd1f032eb65184f3feaaf240b [file] [log] [blame]
Icenowy Zheng4e287f62018-07-23 06:13:34 +08001/*
2 * sun50i H6 platform dram controller init
3 *
4 * (C) Copyright 2017 Icenowy Zheng <icenowy@aosc.io>
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8#include <common.h>
9#include <asm/io.h>
10#include <asm/arch/clock.h>
11#include <asm/arch/dram.h>
12#include <asm/arch/cpu.h>
13#include <linux/bitops.h>
14#include <linux/kconfig.h>
15
16/*
17 * The DRAM controller structure on H6 is similar to the ones on A23/A80:
18 * they all contains 3 parts, COM, CTL and PHY. (As a note on A33/A83T/H3/A64
19 * /H5/R40 CTL and PHY is composed).
20 *
21 * COM is allwinner-specific. On H6, the address mapping function is moved
22 * from COM to CTL (with the standard ADDRMAP registers on DesignWare memory
23 * controller).
24 *
25 * CTL (controller) and PHY is from DesignWare.
26 *
27 * The CTL part is a bit similar to the one on A23/A80 (because they all
28 * originate from DesignWare), but gets more registers added.
29 *
30 * The PHY part is quite new, not seen in any previous Allwinner SoCs, and
31 * not seen on other SoCs in U-Boot. The only SoC that is also known to have
32 * similar PHY is ZynqMP.
33 */
34
Icenowy Zheng4e287f62018-07-23 06:13:34 +080035static void mctl_sys_init(struct dram_para *para);
36static void mctl_com_init(struct dram_para *para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +080037static void mctl_channel_init(struct dram_para *para);
38
39static void mctl_core_init(struct dram_para *para)
40{
41 mctl_sys_init(para);
42 mctl_com_init(para);
43 switch (para->type) {
44 case SUNXI_DRAM_TYPE_LPDDR3:
Andre Przywarac78a47a2019-07-15 02:27:07 +010045 case SUNXI_DRAM_TYPE_DDR3:
Andre Przywara1c7a7512019-07-15 02:27:06 +010046 mctl_set_timing_params(para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +080047 break;
48 default:
49 panic("Unsupported DRAM type!");
50 };
51 mctl_channel_init(para);
52}
53
Andre Przywara595475e2019-07-15 02:27:05 +010054/* PHY initialisation */
Icenowy Zheng4e287f62018-07-23 06:13:34 +080055static void mctl_phy_pir_init(u32 val)
56{
57 struct sunxi_mctl_phy_reg * const mctl_phy =
58 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
59
Andre Przywara595475e2019-07-15 02:27:05 +010060 writel(val, &mctl_phy->pir);
61 writel(val | BIT(0), &mctl_phy->pir); /* Start initialisation. */
Icenowy Zheng4e287f62018-07-23 06:13:34 +080062 mctl_await_completion(&mctl_phy->pgsr[0], BIT(0), BIT(0));
63}
64
65enum {
66 MBUS_PORT_CPU = 0,
67 MBUS_PORT_GPU = 1,
68 MBUS_PORT_MAHB = 2,
69 MBUS_PORT_DMA = 3,
70 MBUS_PORT_VE = 4,
71 MBUS_PORT_CE = 5,
72 MBUS_PORT_TSC0 = 6,
73 MBUS_PORT_NDFC0 = 8,
74 MBUS_PORT_CSI0 = 11,
75 MBUS_PORT_DI0 = 14,
76 MBUS_PORT_DI1 = 15,
77 MBUS_PORT_DE300 = 16,
78 MBUS_PORT_IOMMU = 25,
79 MBUS_PORT_VE2 = 26,
80 MBUS_PORT_USB3 = 37,
81 MBUS_PORT_PCIE = 38,
82 MBUS_PORT_VP9 = 39,
83 MBUS_PORT_HDCP2 = 40,
84};
85
86enum {
87 MBUS_QOS_LOWEST = 0,
88 MBUS_QOS_LOW,
89 MBUS_QOS_HIGH,
90 MBUS_QOS_HIGHEST
91};
92inline void mbus_configure_port(u8 port,
93 bool bwlimit,
94 bool priority,
95 u8 qos,
96 u8 waittime,
97 u8 acs,
98 u16 bwl0,
99 u16 bwl1,
100 u16 bwl2)
101{
102 struct sunxi_mctl_com_reg * const mctl_com =
103 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
104
105 const u32 cfg0 = ( (bwlimit ? (1 << 0) : 0)
106 | (priority ? (1 << 1) : 0)
107 | ((qos & 0x3) << 2)
108 | ((waittime & 0xf) << 4)
109 | ((acs & 0xff) << 8)
110 | (bwl0 << 16) );
111 const u32 cfg1 = ((u32)bwl2 << 16) | (bwl1 & 0xffff);
112
113 debug("MBUS port %d cfg0 %08x cfg1 %08x\n", port, cfg0, cfg1);
114 writel(cfg0, &mctl_com->master[port].cfg0);
115 writel(cfg1, &mctl_com->master[port].cfg1);
116}
117
118#define MBUS_CONF(port, bwlimit, qos, acs, bwl0, bwl1, bwl2) \
119 mbus_configure_port(MBUS_PORT_ ## port, bwlimit, false, \
120 MBUS_QOS_ ## qos, 0, acs, bwl0, bwl1, bwl2)
121
122static void mctl_set_master_priority(void)
123{
124 struct sunxi_mctl_com_reg * const mctl_com =
125 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
126
127 /* enable bandwidth limit windows and set windows size 1us */
128 writel(399, &mctl_com->tmr);
129 writel(BIT(16), &mctl_com->bwcr);
130
131 MBUS_CONF( CPU, true, HIGHEST, 0, 256, 128, 100);
132 MBUS_CONF( GPU, true, HIGH, 0, 1536, 1400, 256);
133 MBUS_CONF( MAHB, true, HIGHEST, 0, 512, 256, 96);
134 MBUS_CONF( DMA, true, HIGH, 0, 256, 100, 80);
135 MBUS_CONF( VE, true, HIGH, 2, 8192, 5500, 5000);
136 MBUS_CONF( CE, true, HIGH, 2, 100, 64, 32);
137 MBUS_CONF( TSC0, true, HIGH, 2, 100, 64, 32);
138 MBUS_CONF(NDFC0, true, HIGH, 0, 256, 128, 64);
139 MBUS_CONF( CSI0, true, HIGH, 0, 256, 128, 100);
140 MBUS_CONF( DI0, true, HIGH, 0, 1024, 256, 64);
141 MBUS_CONF(DE300, true, HIGHEST, 6, 8192, 2800, 2400);
142 MBUS_CONF(IOMMU, true, HIGHEST, 0, 100, 64, 32);
143 MBUS_CONF( VE2, true, HIGH, 2, 8192, 5500, 5000);
144 MBUS_CONF( USB3, true, HIGH, 0, 256, 128, 64);
145 MBUS_CONF( PCIE, true, HIGH, 2, 100, 64, 32);
146 MBUS_CONF( VP9, true, HIGH, 2, 8192, 5500, 5000);
147 MBUS_CONF(HDCP2, true, HIGH, 2, 100, 64, 32);
148}
149
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800150static void mctl_sys_init(struct dram_para *para)
151{
152 struct sunxi_ccm_reg * const ccm =
153 (struct sunxi_ccm_reg *)SUNXI_CCM_BASE;
154 struct sunxi_mctl_com_reg * const mctl_com =
155 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
156 struct sunxi_mctl_ctl_reg * const mctl_ctl =
157 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
158
159 /* Put all DRAM-related blocks to reset state */
160 clrbits_le32(&ccm->mbus_cfg, MBUS_ENABLE | MBUS_RESET);
Icenowy Zhengac2ed962018-10-06 23:23:32 +0800161 clrbits_le32(&ccm->dram_gate_reset, BIT(0));
162 udelay(5);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800163 writel(0, &ccm->dram_gate_reset);
164 clrbits_le32(&ccm->pll5_cfg, CCM_PLL5_CTRL_EN);
165 clrbits_le32(&ccm->dram_clk_cfg, DRAM_MOD_RESET);
166
167 udelay(5);
168
169 /* Set PLL5 rate to doubled DRAM clock rate */
170 writel(CCM_PLL5_CTRL_EN | CCM_PLL5_LOCK_EN |
171 CCM_PLL5_CTRL_N(para->clk * 2 / 24 - 1), &ccm->pll5_cfg);
172 mctl_await_completion(&ccm->pll5_cfg, CCM_PLL5_LOCK, CCM_PLL5_LOCK);
173
174 /* Configure DRAM mod clock */
175 writel(DRAM_CLK_SRC_PLL5, &ccm->dram_clk_cfg);
176 setbits_le32(&ccm->dram_clk_cfg, DRAM_CLK_UPDATE);
Icenowy Zhengac2ed962018-10-06 23:23:32 +0800177 writel(BIT(RESET_SHIFT), &ccm->dram_gate_reset);
178 udelay(5);
179 setbits_le32(&ccm->dram_gate_reset, BIT(0));
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800180
181 /* Disable all channels */
182 writel(0, &mctl_com->maer0);
183 writel(0, &mctl_com->maer1);
184 writel(0, &mctl_com->maer2);
185
186 /* Configure MBUS and enable DRAM mod reset */
187 setbits_le32(&ccm->mbus_cfg, MBUS_RESET);
188 setbits_le32(&ccm->mbus_cfg, MBUS_ENABLE);
189 setbits_le32(&ccm->dram_clk_cfg, DRAM_MOD_RESET);
190 udelay(5);
191
192 /* Unknown hack from the BSP, which enables access of mctl_ctl regs */
193 writel(0x8000, &mctl_ctl->unk_0x00c);
194}
195
196static void mctl_set_addrmap(struct dram_para *para)
197{
198 struct sunxi_mctl_ctl_reg * const mctl_ctl =
199 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
200 u8 cols = para->cols;
201 u8 rows = para->rows;
202 u8 ranks = para->ranks;
203
204 /* Ranks */
205 if (ranks == 2)
206 mctl_ctl->addrmap[0] = rows + cols - 3;
207 else
208 mctl_ctl->addrmap[0] = 0x1F;
209
210 /* Banks, hardcoded to 8 banks now */
211 mctl_ctl->addrmap[1] = (cols - 2) | (cols - 2) << 8 | (cols - 2) << 16;
212
213 /* Columns */
214 mctl_ctl->addrmap[2] = 0;
215 switch (cols) {
216 case 8:
217 mctl_ctl->addrmap[3] = 0x1F1F0000;
218 mctl_ctl->addrmap[4] = 0x1F1F;
219 break;
220 case 9:
221 mctl_ctl->addrmap[3] = 0x1F000000;
222 mctl_ctl->addrmap[4] = 0x1F1F;
223 break;
224 case 10:
225 mctl_ctl->addrmap[3] = 0;
226 mctl_ctl->addrmap[4] = 0x1F1F;
227 break;
228 case 11:
229 mctl_ctl->addrmap[3] = 0;
230 mctl_ctl->addrmap[4] = 0x1F00;
231 break;
232 case 12:
233 mctl_ctl->addrmap[3] = 0;
234 mctl_ctl->addrmap[4] = 0;
235 break;
236 default:
237 panic("Unsupported DRAM configuration: column number invalid\n");
238 }
239
240 /* Rows */
241 mctl_ctl->addrmap[5] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
242 switch (rows) {
243 case 13:
244 mctl_ctl->addrmap[6] = (cols - 3) | 0x0F0F0F00;
245 mctl_ctl->addrmap[7] = 0x0F0F;
246 break;
247 case 14:
248 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | 0x0F0F0000;
249 mctl_ctl->addrmap[7] = 0x0F0F;
250 break;
251 case 15:
252 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | 0x0F000000;
253 mctl_ctl->addrmap[7] = 0x0F0F;
254 break;
255 case 16:
256 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
257 mctl_ctl->addrmap[7] = 0x0F0F;
258 break;
259 case 17:
260 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
261 mctl_ctl->addrmap[7] = (cols - 3) | 0x0F00;
262 break;
263 case 18:
264 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
265 mctl_ctl->addrmap[7] = (cols - 3) | ((cols - 3) << 8);
266 break;
267 default:
268 panic("Unsupported DRAM configuration: row number invalid\n");
269 }
270
271 /* Bank groups, DDR4 only */
272 mctl_ctl->addrmap[8] = 0x3F3F;
273}
274
275static void mctl_com_init(struct dram_para *para)
276{
277 struct sunxi_mctl_com_reg * const mctl_com =
278 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
279 struct sunxi_mctl_ctl_reg * const mctl_ctl =
280 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
281 struct sunxi_mctl_phy_reg * const mctl_phy =
282 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
283 u32 reg_val, tmp;
284
285 mctl_set_addrmap(para);
286
287 setbits_le32(&mctl_com->cr, BIT(31));
Andre Przywarac78a47a2019-07-15 02:27:07 +0100288
289 /* The bonding ID seems to be always 7. */
290 if (readl(SUNXI_SIDC_BASE + 0x100) == 7) /* bonding ID */
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800291 clrbits_le32(&mctl_com->cr, BIT(27));
Andre Przywarac78a47a2019-07-15 02:27:07 +0100292 else if (readl(SUNXI_SIDC_BASE + 0x100) == 3)
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800293 setbits_le32(&mctl_com->cr, BIT(27));
294
295 if (para->clk > 408)
296 reg_val = 0xf00;
297 else if (para->clk > 246)
298 reg_val = 0x1f00;
299 else
300 reg_val = 0x3f00;
301 clrsetbits_le32(&mctl_com->unk_0x008, 0x3f00, reg_val);
302
Andre Przywarac78a47a2019-07-15 02:27:07 +0100303 /* TODO: half DQ, DDR4 */
304 reg_val = MSTR_BUSWIDTH_FULL | MSTR_BURST_LENGTH(8) |
305 MSTR_ACTIVE_RANKS(para->ranks);
306 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
307 reg_val |= MSTR_DEVICETYPE_LPDDR3;
308 if (para->type == SUNXI_DRAM_TYPE_DDR3)
309 reg_val |= MSTR_DEVICETYPE_DDR3 | MSTR_2TMODE;
310 writel(reg_val | BIT(31), &mctl_ctl->mstr);
311
312 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
313 reg_val = DCR_LPDDR3 | DCR_DDR8BANK;
314 if (para->type == SUNXI_DRAM_TYPE_DDR3)
315 reg_val = DCR_DDR3 | DCR_DDR8BANK | DCR_DDR2T;
316 writel(reg_val | 0x400, &mctl_phy->dcr);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800317
318 if (para->ranks == 2)
319 writel(0x0303, &mctl_ctl->odtmap);
320 else
321 writel(0x0201, &mctl_ctl->odtmap);
322
Andre Przywarac78a47a2019-07-15 02:27:07 +0100323 /* TODO: DDR4 */
324 if (para->type == SUNXI_DRAM_TYPE_LPDDR3) {
325 tmp = para->clk * 7 / 2000;
326 reg_val = 0x0400;
327 reg_val |= (tmp + 7) << 24;
328 reg_val |= (((para->clk < 400) ? 3 : 4) - tmp) << 16;
329 } else if (para->type == SUNXI_DRAM_TYPE_DDR3) {
330 reg_val = 0x06000400; /* TODO?: Use CL - CWL value in [7:0] */
331 } else {
332 panic("Only (LP)DDR3 supported (type = %d)\n", para->type);
333 }
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800334 writel(reg_val, &mctl_ctl->odtcfg);
335
336 /* TODO: half DQ */
337}
338
339static void mctl_bit_delay_set(struct dram_para *para)
340{
341 struct sunxi_mctl_phy_reg * const mctl_phy =
342 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
343 int i, j;
344 u32 val;
345
346 for (i = 0; i < 4; i++) {
347 val = readl(&mctl_phy->dx[i].bdlr0);
348 for (j = 0; j < 4; j++)
349 val += para->dx_write_delays[i][j] << (j * 8);
350 writel(val, &mctl_phy->dx[i].bdlr0);
351
352 val = readl(&mctl_phy->dx[i].bdlr1);
353 for (j = 0; j < 4; j++)
354 val += para->dx_write_delays[i][j + 4] << (j * 8);
355 writel(val, &mctl_phy->dx[i].bdlr1);
356
357 val = readl(&mctl_phy->dx[i].bdlr2);
358 for (j = 0; j < 4; j++)
359 val += para->dx_write_delays[i][j + 8] << (j * 8);
360 writel(val, &mctl_phy->dx[i].bdlr2);
361 }
362 clrbits_le32(&mctl_phy->pgcr[0], BIT(26));
363
364 for (i = 0; i < 4; i++) {
365 val = readl(&mctl_phy->dx[i].bdlr3);
366 for (j = 0; j < 4; j++)
367 val += para->dx_read_delays[i][j] << (j * 8);
368 writel(val, &mctl_phy->dx[i].bdlr3);
369
370 val = readl(&mctl_phy->dx[i].bdlr4);
371 for (j = 0; j < 4; j++)
372 val += para->dx_read_delays[i][j + 4] << (j * 8);
373 writel(val, &mctl_phy->dx[i].bdlr4);
374
375 val = readl(&mctl_phy->dx[i].bdlr5);
376 for (j = 0; j < 4; j++)
377 val += para->dx_read_delays[i][j + 8] << (j * 8);
378 writel(val, &mctl_phy->dx[i].bdlr5);
379
380 val = readl(&mctl_phy->dx[i].bdlr6);
381 val += (para->dx_read_delays[i][12] << 8) |
382 (para->dx_read_delays[i][13] << 16);
383 writel(val, &mctl_phy->dx[i].bdlr6);
384 }
385 setbits_le32(&mctl_phy->pgcr[0], BIT(26));
386 udelay(1);
387
Andre Przywarac78a47a2019-07-15 02:27:07 +0100388 if (para->type != SUNXI_DRAM_TYPE_LPDDR3)
389 return;
390
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800391 for (i = 1; i < 14; i++) {
392 val = readl(&mctl_phy->acbdlr[i]);
393 val += 0x0a0a0a0a;
394 writel(val, &mctl_phy->acbdlr[i]);
395 }
396}
397
398static void mctl_channel_init(struct dram_para *para)
399{
400 struct sunxi_mctl_com_reg * const mctl_com =
401 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
402 struct sunxi_mctl_ctl_reg * const mctl_ctl =
403 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
404 struct sunxi_mctl_phy_reg * const mctl_phy =
405 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
406 int i;
407 u32 val;
408
409 setbits_le32(&mctl_ctl->dfiupd[0], BIT(31) | BIT(30));
410 setbits_le32(&mctl_ctl->zqctl[0], BIT(31) | BIT(30));
411 writel(0x2f05, &mctl_ctl->sched[0]);
412 setbits_le32(&mctl_ctl->rfshctl3, BIT(0));
413 setbits_le32(&mctl_ctl->dfimisc, BIT(0));
414 setbits_le32(&mctl_ctl->unk_0x00c, BIT(8));
415 clrsetbits_le32(&mctl_phy->pgcr[1], 0x180, 0xc0);
416 /* TODO: non-LPDDR3 types */
417 clrsetbits_le32(&mctl_phy->pgcr[2], GENMASK(17, 0), ns_to_t(7800));
418 clrbits_le32(&mctl_phy->pgcr[6], BIT(0));
419 clrsetbits_le32(&mctl_phy->dxccr, 0xee0, 0x220);
420 /* TODO: VT compensation */
421 clrsetbits_le32(&mctl_phy->dsgcr, BIT(0), 0x440060);
422 clrbits_le32(&mctl_phy->vtcr[1], BIT(1));
423
424 for (i = 0; i < 4; i++)
425 clrsetbits_le32(&mctl_phy->dx[i].gcr[0], 0xe00, 0x800);
426 for (i = 0; i < 4; i++)
427 clrsetbits_le32(&mctl_phy->dx[i].gcr[2], 0xffff, 0x5555);
428 for (i = 0; i < 4; i++)
429 clrsetbits_le32(&mctl_phy->dx[i].gcr[3], 0x3030, 0x1010);
430
431 udelay(100);
432
433 if (para->ranks == 2)
434 setbits_le32(&mctl_phy->dtcr[1], 0x30000);
435 else
436 clrsetbits_le32(&mctl_phy->dtcr[1], 0x30000, 0x10000);
437
Andre Przywarac78a47a2019-07-15 02:27:07 +0100438 if (sunxi_dram_is_lpddr(para->type))
439 clrbits_le32(&mctl_phy->dtcr[1], BIT(1));
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800440 if (para->ranks == 2) {
441 writel(0x00010001, &mctl_phy->rankidr);
442 writel(0x20000, &mctl_phy->odtcr);
443 } else {
444 writel(0x0, &mctl_phy->rankidr);
445 writel(0x10000, &mctl_phy->odtcr);
446 }
447
Andre Przywarac78a47a2019-07-15 02:27:07 +0100448 /* set bits [3:0] to 1? 0 not valid in ZynqMP d/s */
449 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
450 clrsetbits_le32(&mctl_phy->dtcr[0], 0xF0000000, 0x10000040);
451 else
452 clrsetbits_le32(&mctl_phy->dtcr[0], 0xF0000000, 0x10000000);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800453 if (para->clk <= 792) {
454 if (para->clk <= 672) {
455 if (para->clk <= 600)
456 val = 0x300;
457 else
458 val = 0x400;
459 } else {
460 val = 0x500;
461 }
462 } else {
463 val = 0x600;
464 }
465 /* FIXME: NOT REVIEWED YET */
466 clrsetbits_le32(&mctl_phy->zq[0].zqcr, 0x700, val);
467 clrsetbits_le32(&mctl_phy->zq[0].zqpr[0], 0xff,
468 CONFIG_DRAM_ZQ & 0xff);
469 clrbits_le32(&mctl_phy->zq[0].zqor[0], 0xfffff);
470 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ >> 8) & 0xff);
471 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ & 0xf00) - 0x100);
472 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ & 0xff00) << 4);
473 clrbits_le32(&mctl_phy->zq[1].zqpr[0], 0xfffff);
474 setbits_le32(&mctl_phy->zq[1].zqpr[0], (CONFIG_DRAM_ZQ >> 16) & 0xff);
475 setbits_le32(&mctl_phy->zq[1].zqpr[0], ((CONFIG_DRAM_ZQ >> 8) & 0xf00) - 0x100);
476 setbits_le32(&mctl_phy->zq[1].zqpr[0], (CONFIG_DRAM_ZQ & 0xff0000) >> 4);
477 if (para->type == SUNXI_DRAM_TYPE_LPDDR3) {
478 for (i = 1; i < 14; i++)
479 writel(0x06060606, &mctl_phy->acbdlr[i]);
480 }
481
Andre Przywarac78a47a2019-07-15 02:27:07 +0100482 val = PIR_ZCAL | PIR_DCAL | PIR_PHYRST | PIR_DRAMINIT | PIR_QSGATE |
483 PIR_RDDSKW | PIR_WRDSKW | PIR_RDEYE | PIR_WREYE;
484 if (para->type == SUNXI_DRAM_TYPE_DDR3)
485 val |= PIR_DRAMRST | PIR_WL;
486 mctl_phy_pir_init(val);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800487
Andre Przywarac78a47a2019-07-15 02:27:07 +0100488 /* TODO: DDR4 types ? */
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800489 for (i = 0; i < 4; i++)
490 writel(0x00000909, &mctl_phy->dx[i].gcr[5]);
491
492 for (i = 0; i < 4; i++) {
493 if (IS_ENABLED(CONFIG_DRAM_ODT_EN))
494 val = 0x0;
495 else
496 val = 0xaaaa;
497 clrsetbits_le32(&mctl_phy->dx[i].gcr[2], 0xffff, val);
498
499 if (IS_ENABLED(CONFIG_DRAM_ODT_EN))
500 val = 0x0;
501 else
502 val = 0x2020;
503 clrsetbits_le32(&mctl_phy->dx[i].gcr[3], 0x3030, val);
504 }
505
506 mctl_bit_delay_set(para);
507 udelay(1);
508
509 setbits_le32(&mctl_phy->pgcr[6], BIT(0));
510 clrbits_le32(&mctl_phy->pgcr[6], 0xfff8);
511 for (i = 0; i < 4; i++)
512 clrbits_le32(&mctl_phy->dx[i].gcr[3], ~0x3ffff);
513 udelay(10);
514
515 if (readl(&mctl_phy->pgsr[0]) & 0x400000)
516 {
517 /*
518 * Detect single rank.
519 * TODO: also detect half DQ.
520 */
521 if ((readl(&mctl_phy->dx[0].rsr[0]) & 0x3) == 2 &&
522 (readl(&mctl_phy->dx[1].rsr[0]) & 0x3) == 2 &&
523 (readl(&mctl_phy->dx[2].rsr[0]) & 0x3) == 2 &&
524 (readl(&mctl_phy->dx[3].rsr[0]) & 0x3) == 2) {
525 para->ranks = 1;
526 /* Restart DRAM initialization from scratch. */
527 mctl_core_init(para);
528 return;
529 }
530 else {
531 panic("This DRAM setup is currently not supported.\n");
532 }
533 }
534
535 if (readl(&mctl_phy->pgsr[0]) & 0xff00000) {
536 /* Oops! There's something wrong! */
537 debug("PLL = %x\n", readl(0x3001010));
538 debug("DRAM PHY PGSR0 = %x\n", readl(&mctl_phy->pgsr[0]));
539 for (i = 0; i < 4; i++)
540 debug("DRAM PHY DX%dRSR0 = %x\n", i, readl(&mctl_phy->dx[i].rsr[0]));
541 panic("Error while initializing DRAM PHY!\n");
542 }
543
Andre Przywarac78a47a2019-07-15 02:27:07 +0100544 if (sunxi_dram_is_lpddr(para->type))
545 clrsetbits_le32(&mctl_phy->dsgcr, 0xc0, 0x40);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800546 clrbits_le32(&mctl_phy->pgcr[1], 0x40);
547 clrbits_le32(&mctl_ctl->dfimisc, BIT(0));
548 writel(1, &mctl_ctl->swctl);
549 mctl_await_completion(&mctl_ctl->swstat, 1, 1);
550 clrbits_le32(&mctl_ctl->rfshctl3, BIT(0));
551
552 setbits_le32(&mctl_com->unk_0x014, BIT(31));
553 writel(0xffffffff, &mctl_com->maer0);
554 writel(0x7ff, &mctl_com->maer1);
555 writel(0xffff, &mctl_com->maer2);
556}
557
558static void mctl_auto_detect_dram_size(struct dram_para *para)
559{
560 /* TODO: non-LPDDR3, half DQ */
561 /*
562 * Detect rank number by the code in mctl_channel_init. Furtherly
563 * when DQ detection is available it will also be executed there.
564 */
565 mctl_core_init(para);
566
567 /* detect row address bits */
568 para->cols = 8;
569 para->rows = 18;
570 mctl_core_init(para);
571
572 for (para->rows = 13; para->rows < 18; para->rows++) {
573 /* 8 banks, 8 bit per byte and 32 bit width */
574 if (mctl_mem_matches((1 << (para->rows + para->cols + 5))))
575 break;
576 }
577
578 /* detect column address bits */
579 para->cols = 11;
580 mctl_core_init(para);
581
582 for (para->cols = 8; para->cols < 11; para->cols++) {
583 /* 8 bits per byte and 32 bit width */
584 if (mctl_mem_matches(1 << (para->cols + 2)))
585 break;
586 }
587}
588
589unsigned long mctl_calc_size(struct dram_para *para)
590{
591 /* TODO: non-LPDDR3, half DQ */
592
593 /* 8 banks, 32-bit (4 byte) data width */
594 return (1ULL << (para->cols + para->rows + 3)) * 4 * para->ranks;
595}
596
597#define SUN50I_H6_DX_WRITE_DELAYS \
598 {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
599 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
600 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0 }, \
601 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}
602#define SUN50I_H6_DX_READ_DELAYS \
603 {{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
604 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
605 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
606 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }}
607
608unsigned long sunxi_dram_init(void)
609{
610 struct sunxi_mctl_com_reg * const mctl_com =
611 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
612 struct dram_para para = {
613 .clk = CONFIG_DRAM_CLK,
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800614 .ranks = 2,
615 .cols = 11,
616 .rows = 14,
Andre Przywarac78a47a2019-07-15 02:27:07 +0100617#ifdef CONFIG_SUNXI_DRAM_H6_LPDDR3
618 .type = SUNXI_DRAM_TYPE_LPDDR3,
619 .dx_read_delays = SUN50I_H6_DX_READ_DELAYS,
620 .dx_write_delays = SUN50I_H6_DX_WRITE_DELAYS,
621#elif defined(CONFIG_SUNXI_DRAM_H6_DDR3_1333)
622 .type = SUNXI_DRAM_TYPE_DDR3,
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800623 .dx_read_delays = SUN50I_H6_DX_READ_DELAYS,
624 .dx_write_delays = SUN50I_H6_DX_WRITE_DELAYS,
Andre Przywara1c7a7512019-07-15 02:27:06 +0100625#endif
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800626 };
627
628 unsigned long size;
629
630 /* RES_CAL_CTRL_REG in BSP U-boot*/
631 setbits_le32(0x7010310, BIT(8));
632 clrbits_le32(0x7010318, 0x3f);
633
634 mctl_auto_detect_dram_size(&para);
635
636 mctl_core_init(&para);
637
638 size = mctl_calc_size(&para);
639
640 clrsetbits_le32(&mctl_com->cr, 0xf0, (size >> (10 + 10 + 4)) & 0xf0);
641
642 mctl_set_master_priority();
643
644 return size;
645};