blob: e7862bd06ea38719c244c82d0bdab92c26ea7684 [file] [log] [blame]
Simon Glass97589732020-05-10 11:40:02 -06001// SPDX-License-Identifier: GPL-2.0+
Icenowy Zheng4e287f62018-07-23 06:13:34 +08002/*
3 * sun50i H6 platform dram controller init
4 *
5 * (C) Copyright 2017 Icenowy Zheng <icenowy@aosc.io>
6 *
Icenowy Zheng4e287f62018-07-23 06:13:34 +08007 */
Simon Glass97589732020-05-10 11:40:02 -06008#include <init.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Icenowy Zheng4e287f62018-07-23 06:13:34 +080010#include <asm/io.h>
11#include <asm/arch/clock.h>
12#include <asm/arch/dram.h>
13#include <asm/arch/cpu.h>
Jernej Skrabece04cd492022-01-30 15:27:13 +010014#include <asm/arch/prcm.h>
Icenowy Zheng4e287f62018-07-23 06:13:34 +080015#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060016#include <linux/delay.h>
Icenowy Zheng4e287f62018-07-23 06:13:34 +080017
18/*
19 * The DRAM controller structure on H6 is similar to the ones on A23/A80:
20 * they all contains 3 parts, COM, CTL and PHY. (As a note on A33/A83T/H3/A64
21 * /H5/R40 CTL and PHY is composed).
22 *
23 * COM is allwinner-specific. On H6, the address mapping function is moved
24 * from COM to CTL (with the standard ADDRMAP registers on DesignWare memory
25 * controller).
26 *
27 * CTL (controller) and PHY is from DesignWare.
28 *
29 * The CTL part is a bit similar to the one on A23/A80 (because they all
30 * originate from DesignWare), but gets more registers added.
31 *
32 * The PHY part is quite new, not seen in any previous Allwinner SoCs, and
33 * not seen on other SoCs in U-Boot. The only SoC that is also known to have
34 * similar PHY is ZynqMP.
35 */
36
Icenowy Zheng4e287f62018-07-23 06:13:34 +080037static void mctl_sys_init(struct dram_para *para);
38static void mctl_com_init(struct dram_para *para);
Jernej Skrabec738334f2020-03-12 17:46:00 +000039static bool mctl_channel_init(struct dram_para *para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +080040
Jernej Skrabec738334f2020-03-12 17:46:00 +000041static bool mctl_core_init(struct dram_para *para)
Icenowy Zheng4e287f62018-07-23 06:13:34 +080042{
43 mctl_sys_init(para);
44 mctl_com_init(para);
45 switch (para->type) {
46 case SUNXI_DRAM_TYPE_LPDDR3:
Andre Przywarac78a47a2019-07-15 02:27:07 +010047 case SUNXI_DRAM_TYPE_DDR3:
Andre Przywara1c7a7512019-07-15 02:27:06 +010048 mctl_set_timing_params(para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +080049 break;
50 default:
51 panic("Unsupported DRAM type!");
52 };
Jernej Skrabec738334f2020-03-12 17:46:00 +000053 return mctl_channel_init(para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +080054}
55
Andre Przywara595475e2019-07-15 02:27:05 +010056/* PHY initialisation */
Icenowy Zheng4e287f62018-07-23 06:13:34 +080057static void mctl_phy_pir_init(u32 val)
58{
59 struct sunxi_mctl_phy_reg * const mctl_phy =
60 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
61
Andre Przywara595475e2019-07-15 02:27:05 +010062 writel(val, &mctl_phy->pir);
63 writel(val | BIT(0), &mctl_phy->pir); /* Start initialisation. */
Icenowy Zheng4e287f62018-07-23 06:13:34 +080064 mctl_await_completion(&mctl_phy->pgsr[0], BIT(0), BIT(0));
65}
66
67enum {
68 MBUS_PORT_CPU = 0,
69 MBUS_PORT_GPU = 1,
70 MBUS_PORT_MAHB = 2,
71 MBUS_PORT_DMA = 3,
72 MBUS_PORT_VE = 4,
73 MBUS_PORT_CE = 5,
74 MBUS_PORT_TSC0 = 6,
75 MBUS_PORT_NDFC0 = 8,
76 MBUS_PORT_CSI0 = 11,
77 MBUS_PORT_DI0 = 14,
78 MBUS_PORT_DI1 = 15,
79 MBUS_PORT_DE300 = 16,
80 MBUS_PORT_IOMMU = 25,
81 MBUS_PORT_VE2 = 26,
82 MBUS_PORT_USB3 = 37,
83 MBUS_PORT_PCIE = 38,
84 MBUS_PORT_VP9 = 39,
85 MBUS_PORT_HDCP2 = 40,
86};
87
88enum {
89 MBUS_QOS_LOWEST = 0,
90 MBUS_QOS_LOW,
91 MBUS_QOS_HIGH,
92 MBUS_QOS_HIGHEST
93};
Andre Przywara16f6f792023-06-07 01:07:41 +010094
95static void mbus_configure_port(u8 port,
Icenowy Zheng4e287f62018-07-23 06:13:34 +080096 bool bwlimit,
97 bool priority,
98 u8 qos,
99 u8 waittime,
100 u8 acs,
101 u16 bwl0,
102 u16 bwl1,
103 u16 bwl2)
104{
105 struct sunxi_mctl_com_reg * const mctl_com =
106 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
107
108 const u32 cfg0 = ( (bwlimit ? (1 << 0) : 0)
109 | (priority ? (1 << 1) : 0)
110 | ((qos & 0x3) << 2)
111 | ((waittime & 0xf) << 4)
112 | ((acs & 0xff) << 8)
113 | (bwl0 << 16) );
114 const u32 cfg1 = ((u32)bwl2 << 16) | (bwl1 & 0xffff);
115
116 debug("MBUS port %d cfg0 %08x cfg1 %08x\n", port, cfg0, cfg1);
117 writel(cfg0, &mctl_com->master[port].cfg0);
118 writel(cfg1, &mctl_com->master[port].cfg1);
119}
120
121#define MBUS_CONF(port, bwlimit, qos, acs, bwl0, bwl1, bwl2) \
122 mbus_configure_port(MBUS_PORT_ ## port, bwlimit, false, \
123 MBUS_QOS_ ## qos, 0, acs, bwl0, bwl1, bwl2)
124
125static void mctl_set_master_priority(void)
126{
127 struct sunxi_mctl_com_reg * const mctl_com =
128 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
129
130 /* enable bandwidth limit windows and set windows size 1us */
131 writel(399, &mctl_com->tmr);
132 writel(BIT(16), &mctl_com->bwcr);
133
134 MBUS_CONF( CPU, true, HIGHEST, 0, 256, 128, 100);
135 MBUS_CONF( GPU, true, HIGH, 0, 1536, 1400, 256);
136 MBUS_CONF( MAHB, true, HIGHEST, 0, 512, 256, 96);
137 MBUS_CONF( DMA, true, HIGH, 0, 256, 100, 80);
138 MBUS_CONF( VE, true, HIGH, 2, 8192, 5500, 5000);
139 MBUS_CONF( CE, true, HIGH, 2, 100, 64, 32);
140 MBUS_CONF( TSC0, true, HIGH, 2, 100, 64, 32);
141 MBUS_CONF(NDFC0, true, HIGH, 0, 256, 128, 64);
142 MBUS_CONF( CSI0, true, HIGH, 0, 256, 128, 100);
143 MBUS_CONF( DI0, true, HIGH, 0, 1024, 256, 64);
144 MBUS_CONF(DE300, true, HIGHEST, 6, 8192, 2800, 2400);
145 MBUS_CONF(IOMMU, true, HIGHEST, 0, 100, 64, 32);
146 MBUS_CONF( VE2, true, HIGH, 2, 8192, 5500, 5000);
147 MBUS_CONF( USB3, true, HIGH, 0, 256, 128, 64);
148 MBUS_CONF( PCIE, true, HIGH, 2, 100, 64, 32);
149 MBUS_CONF( VP9, true, HIGH, 2, 8192, 5500, 5000);
150 MBUS_CONF(HDCP2, true, HIGH, 2, 100, 64, 32);
151}
152
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800153static void mctl_sys_init(struct dram_para *para)
154{
155 struct sunxi_ccm_reg * const ccm =
156 (struct sunxi_ccm_reg *)SUNXI_CCM_BASE;
157 struct sunxi_mctl_com_reg * const mctl_com =
158 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
159 struct sunxi_mctl_ctl_reg * const mctl_ctl =
160 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
161
162 /* Put all DRAM-related blocks to reset state */
163 clrbits_le32(&ccm->mbus_cfg, MBUS_ENABLE | MBUS_RESET);
Icenowy Zhengac2ed962018-10-06 23:23:32 +0800164 clrbits_le32(&ccm->dram_gate_reset, BIT(0));
165 udelay(5);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800166 writel(0, &ccm->dram_gate_reset);
167 clrbits_le32(&ccm->pll5_cfg, CCM_PLL5_CTRL_EN);
168 clrbits_le32(&ccm->dram_clk_cfg, DRAM_MOD_RESET);
169
170 udelay(5);
171
172 /* Set PLL5 rate to doubled DRAM clock rate */
173 writel(CCM_PLL5_CTRL_EN | CCM_PLL5_LOCK_EN |
Andre Przywara0f7c8bc2021-05-05 13:53:05 +0100174 CCM_PLL5_CTRL_N(para->clk * 2 / 24), &ccm->pll5_cfg);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800175 mctl_await_completion(&ccm->pll5_cfg, CCM_PLL5_LOCK, CCM_PLL5_LOCK);
176
177 /* Configure DRAM mod clock */
178 writel(DRAM_CLK_SRC_PLL5, &ccm->dram_clk_cfg);
179 setbits_le32(&ccm->dram_clk_cfg, DRAM_CLK_UPDATE);
Icenowy Zhengac2ed962018-10-06 23:23:32 +0800180 writel(BIT(RESET_SHIFT), &ccm->dram_gate_reset);
181 udelay(5);
182 setbits_le32(&ccm->dram_gate_reset, BIT(0));
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800183
184 /* Disable all channels */
185 writel(0, &mctl_com->maer0);
186 writel(0, &mctl_com->maer1);
187 writel(0, &mctl_com->maer2);
188
189 /* Configure MBUS and enable DRAM mod reset */
190 setbits_le32(&ccm->mbus_cfg, MBUS_RESET);
191 setbits_le32(&ccm->mbus_cfg, MBUS_ENABLE);
192 setbits_le32(&ccm->dram_clk_cfg, DRAM_MOD_RESET);
193 udelay(5);
194
195 /* Unknown hack from the BSP, which enables access of mctl_ctl regs */
196 writel(0x8000, &mctl_ctl->unk_0x00c);
197}
198
199static void mctl_set_addrmap(struct dram_para *para)
200{
201 struct sunxi_mctl_ctl_reg * const mctl_ctl =
202 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
203 u8 cols = para->cols;
204 u8 rows = para->rows;
205 u8 ranks = para->ranks;
206
Jernej Skrabec370245e2019-08-23 19:24:04 +0200207 if (!para->bus_full_width)
208 cols -= 1;
209
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800210 /* Ranks */
211 if (ranks == 2)
212 mctl_ctl->addrmap[0] = rows + cols - 3;
213 else
214 mctl_ctl->addrmap[0] = 0x1F;
215
216 /* Banks, hardcoded to 8 banks now */
217 mctl_ctl->addrmap[1] = (cols - 2) | (cols - 2) << 8 | (cols - 2) << 16;
218
219 /* Columns */
220 mctl_ctl->addrmap[2] = 0;
221 switch (cols) {
Jernej Skrabec370245e2019-08-23 19:24:04 +0200222 case 7:
223 mctl_ctl->addrmap[3] = 0x1F1F1F00;
224 mctl_ctl->addrmap[4] = 0x1F1F;
225 break;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800226 case 8:
227 mctl_ctl->addrmap[3] = 0x1F1F0000;
228 mctl_ctl->addrmap[4] = 0x1F1F;
229 break;
230 case 9:
231 mctl_ctl->addrmap[3] = 0x1F000000;
232 mctl_ctl->addrmap[4] = 0x1F1F;
233 break;
234 case 10:
235 mctl_ctl->addrmap[3] = 0;
236 mctl_ctl->addrmap[4] = 0x1F1F;
237 break;
238 case 11:
239 mctl_ctl->addrmap[3] = 0;
240 mctl_ctl->addrmap[4] = 0x1F00;
241 break;
242 case 12:
243 mctl_ctl->addrmap[3] = 0;
244 mctl_ctl->addrmap[4] = 0;
245 break;
246 default:
247 panic("Unsupported DRAM configuration: column number invalid\n");
248 }
249
250 /* Rows */
251 mctl_ctl->addrmap[5] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
252 switch (rows) {
253 case 13:
254 mctl_ctl->addrmap[6] = (cols - 3) | 0x0F0F0F00;
255 mctl_ctl->addrmap[7] = 0x0F0F;
256 break;
257 case 14:
258 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | 0x0F0F0000;
259 mctl_ctl->addrmap[7] = 0x0F0F;
260 break;
261 case 15:
262 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | 0x0F000000;
263 mctl_ctl->addrmap[7] = 0x0F0F;
264 break;
265 case 16:
266 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
267 mctl_ctl->addrmap[7] = 0x0F0F;
268 break;
269 case 17:
270 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
271 mctl_ctl->addrmap[7] = (cols - 3) | 0x0F00;
272 break;
273 case 18:
274 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
275 mctl_ctl->addrmap[7] = (cols - 3) | ((cols - 3) << 8);
276 break;
277 default:
278 panic("Unsupported DRAM configuration: row number invalid\n");
279 }
280
281 /* Bank groups, DDR4 only */
282 mctl_ctl->addrmap[8] = 0x3F3F;
283}
284
285static void mctl_com_init(struct dram_para *para)
286{
287 struct sunxi_mctl_com_reg * const mctl_com =
288 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
289 struct sunxi_mctl_ctl_reg * const mctl_ctl =
290 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
291 struct sunxi_mctl_phy_reg * const mctl_phy =
292 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
293 u32 reg_val, tmp;
294
295 mctl_set_addrmap(para);
296
297 setbits_le32(&mctl_com->cr, BIT(31));
Andre Przywarac78a47a2019-07-15 02:27:07 +0100298
299 /* The bonding ID seems to be always 7. */
300 if (readl(SUNXI_SIDC_BASE + 0x100) == 7) /* bonding ID */
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800301 clrbits_le32(&mctl_com->cr, BIT(27));
Andre Przywarac78a47a2019-07-15 02:27:07 +0100302 else if (readl(SUNXI_SIDC_BASE + 0x100) == 3)
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800303 setbits_le32(&mctl_com->cr, BIT(27));
304
305 if (para->clk > 408)
306 reg_val = 0xf00;
307 else if (para->clk > 246)
308 reg_val = 0x1f00;
309 else
310 reg_val = 0x3f00;
311 clrsetbits_le32(&mctl_com->unk_0x008, 0x3f00, reg_val);
312
Jernej Skrabec370245e2019-08-23 19:24:04 +0200313 /* TODO: DDR4 */
314 reg_val = MSTR_BURST_LENGTH(8) | MSTR_ACTIVE_RANKS(para->ranks);
Andre Przywarac78a47a2019-07-15 02:27:07 +0100315 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
316 reg_val |= MSTR_DEVICETYPE_LPDDR3;
317 if (para->type == SUNXI_DRAM_TYPE_DDR3)
318 reg_val |= MSTR_DEVICETYPE_DDR3 | MSTR_2TMODE;
Jernej Skrabec370245e2019-08-23 19:24:04 +0200319 if (para->bus_full_width)
320 reg_val |= MSTR_BUSWIDTH_FULL;
321 else
322 reg_val |= MSTR_BUSWIDTH_HALF;
Andre Przywarac78a47a2019-07-15 02:27:07 +0100323 writel(reg_val | BIT(31), &mctl_ctl->mstr);
324
325 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
326 reg_val = DCR_LPDDR3 | DCR_DDR8BANK;
327 if (para->type == SUNXI_DRAM_TYPE_DDR3)
328 reg_val = DCR_DDR3 | DCR_DDR8BANK | DCR_DDR2T;
329 writel(reg_val | 0x400, &mctl_phy->dcr);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800330
331 if (para->ranks == 2)
332 writel(0x0303, &mctl_ctl->odtmap);
333 else
334 writel(0x0201, &mctl_ctl->odtmap);
335
Andre Przywarac78a47a2019-07-15 02:27:07 +0100336 /* TODO: DDR4 */
337 if (para->type == SUNXI_DRAM_TYPE_LPDDR3) {
338 tmp = para->clk * 7 / 2000;
339 reg_val = 0x0400;
340 reg_val |= (tmp + 7) << 24;
341 reg_val |= (((para->clk < 400) ? 3 : 4) - tmp) << 16;
342 } else if (para->type == SUNXI_DRAM_TYPE_DDR3) {
343 reg_val = 0x06000400; /* TODO?: Use CL - CWL value in [7:0] */
344 } else {
345 panic("Only (LP)DDR3 supported (type = %d)\n", para->type);
346 }
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800347 writel(reg_val, &mctl_ctl->odtcfg);
348
Jernej Skrabec370245e2019-08-23 19:24:04 +0200349 if (!para->bus_full_width) {
350 writel(0x0, &mctl_phy->dx[2].gcr[0]);
351 writel(0x0, &mctl_phy->dx[3].gcr[0]);
352 }
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800353}
354
355static void mctl_bit_delay_set(struct dram_para *para)
356{
357 struct sunxi_mctl_phy_reg * const mctl_phy =
358 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
359 int i, j;
360 u32 val;
361
362 for (i = 0; i < 4; i++) {
363 val = readl(&mctl_phy->dx[i].bdlr0);
364 for (j = 0; j < 4; j++)
365 val += para->dx_write_delays[i][j] << (j * 8);
366 writel(val, &mctl_phy->dx[i].bdlr0);
367
368 val = readl(&mctl_phy->dx[i].bdlr1);
369 for (j = 0; j < 4; j++)
370 val += para->dx_write_delays[i][j + 4] << (j * 8);
371 writel(val, &mctl_phy->dx[i].bdlr1);
372
373 val = readl(&mctl_phy->dx[i].bdlr2);
374 for (j = 0; j < 4; j++)
375 val += para->dx_write_delays[i][j + 8] << (j * 8);
376 writel(val, &mctl_phy->dx[i].bdlr2);
377 }
378 clrbits_le32(&mctl_phy->pgcr[0], BIT(26));
379
380 for (i = 0; i < 4; i++) {
381 val = readl(&mctl_phy->dx[i].bdlr3);
382 for (j = 0; j < 4; j++)
383 val += para->dx_read_delays[i][j] << (j * 8);
384 writel(val, &mctl_phy->dx[i].bdlr3);
385
386 val = readl(&mctl_phy->dx[i].bdlr4);
387 for (j = 0; j < 4; j++)
388 val += para->dx_read_delays[i][j + 4] << (j * 8);
389 writel(val, &mctl_phy->dx[i].bdlr4);
390
391 val = readl(&mctl_phy->dx[i].bdlr5);
392 for (j = 0; j < 4; j++)
393 val += para->dx_read_delays[i][j + 8] << (j * 8);
394 writel(val, &mctl_phy->dx[i].bdlr5);
395
396 val = readl(&mctl_phy->dx[i].bdlr6);
397 val += (para->dx_read_delays[i][12] << 8) |
398 (para->dx_read_delays[i][13] << 16);
399 writel(val, &mctl_phy->dx[i].bdlr6);
400 }
401 setbits_le32(&mctl_phy->pgcr[0], BIT(26));
402 udelay(1);
403
Andre Przywarac78a47a2019-07-15 02:27:07 +0100404 if (para->type != SUNXI_DRAM_TYPE_LPDDR3)
405 return;
406
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800407 for (i = 1; i < 14; i++) {
408 val = readl(&mctl_phy->acbdlr[i]);
409 val += 0x0a0a0a0a;
410 writel(val, &mctl_phy->acbdlr[i]);
411 }
412}
413
Jernej Skrabec738334f2020-03-12 17:46:00 +0000414static bool mctl_channel_init(struct dram_para *para)
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800415{
416 struct sunxi_mctl_com_reg * const mctl_com =
417 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
418 struct sunxi_mctl_ctl_reg * const mctl_ctl =
419 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
420 struct sunxi_mctl_phy_reg * const mctl_phy =
421 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
422 int i;
423 u32 val;
424
425 setbits_le32(&mctl_ctl->dfiupd[0], BIT(31) | BIT(30));
426 setbits_le32(&mctl_ctl->zqctl[0], BIT(31) | BIT(30));
427 writel(0x2f05, &mctl_ctl->sched[0]);
428 setbits_le32(&mctl_ctl->rfshctl3, BIT(0));
429 setbits_le32(&mctl_ctl->dfimisc, BIT(0));
430 setbits_le32(&mctl_ctl->unk_0x00c, BIT(8));
431 clrsetbits_le32(&mctl_phy->pgcr[1], 0x180, 0xc0);
432 /* TODO: non-LPDDR3 types */
433 clrsetbits_le32(&mctl_phy->pgcr[2], GENMASK(17, 0), ns_to_t(7800));
434 clrbits_le32(&mctl_phy->pgcr[6], BIT(0));
435 clrsetbits_le32(&mctl_phy->dxccr, 0xee0, 0x220);
436 /* TODO: VT compensation */
437 clrsetbits_le32(&mctl_phy->dsgcr, BIT(0), 0x440060);
438 clrbits_le32(&mctl_phy->vtcr[1], BIT(1));
439
440 for (i = 0; i < 4; i++)
441 clrsetbits_le32(&mctl_phy->dx[i].gcr[0], 0xe00, 0x800);
442 for (i = 0; i < 4; i++)
443 clrsetbits_le32(&mctl_phy->dx[i].gcr[2], 0xffff, 0x5555);
444 for (i = 0; i < 4; i++)
445 clrsetbits_le32(&mctl_phy->dx[i].gcr[3], 0x3030, 0x1010);
446
447 udelay(100);
448
449 if (para->ranks == 2)
450 setbits_le32(&mctl_phy->dtcr[1], 0x30000);
451 else
452 clrsetbits_le32(&mctl_phy->dtcr[1], 0x30000, 0x10000);
453
Andre Przywarac78a47a2019-07-15 02:27:07 +0100454 if (sunxi_dram_is_lpddr(para->type))
455 clrbits_le32(&mctl_phy->dtcr[1], BIT(1));
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800456 if (para->ranks == 2) {
457 writel(0x00010001, &mctl_phy->rankidr);
458 writel(0x20000, &mctl_phy->odtcr);
459 } else {
460 writel(0x0, &mctl_phy->rankidr);
461 writel(0x10000, &mctl_phy->odtcr);
462 }
463
Andre Przywarac78a47a2019-07-15 02:27:07 +0100464 /* set bits [3:0] to 1? 0 not valid in ZynqMP d/s */
465 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
466 clrsetbits_le32(&mctl_phy->dtcr[0], 0xF0000000, 0x10000040);
467 else
468 clrsetbits_le32(&mctl_phy->dtcr[0], 0xF0000000, 0x10000000);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800469 if (para->clk <= 792) {
470 if (para->clk <= 672) {
471 if (para->clk <= 600)
472 val = 0x300;
473 else
474 val = 0x400;
475 } else {
476 val = 0x500;
477 }
478 } else {
479 val = 0x600;
480 }
481 /* FIXME: NOT REVIEWED YET */
482 clrsetbits_le32(&mctl_phy->zq[0].zqcr, 0x700, val);
483 clrsetbits_le32(&mctl_phy->zq[0].zqpr[0], 0xff,
484 CONFIG_DRAM_ZQ & 0xff);
485 clrbits_le32(&mctl_phy->zq[0].zqor[0], 0xfffff);
486 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ >> 8) & 0xff);
487 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ & 0xf00) - 0x100);
488 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ & 0xff00) << 4);
489 clrbits_le32(&mctl_phy->zq[1].zqpr[0], 0xfffff);
490 setbits_le32(&mctl_phy->zq[1].zqpr[0], (CONFIG_DRAM_ZQ >> 16) & 0xff);
491 setbits_le32(&mctl_phy->zq[1].zqpr[0], ((CONFIG_DRAM_ZQ >> 8) & 0xf00) - 0x100);
492 setbits_le32(&mctl_phy->zq[1].zqpr[0], (CONFIG_DRAM_ZQ & 0xff0000) >> 4);
493 if (para->type == SUNXI_DRAM_TYPE_LPDDR3) {
494 for (i = 1; i < 14; i++)
495 writel(0x06060606, &mctl_phy->acbdlr[i]);
496 }
497
Andre Przywarac78a47a2019-07-15 02:27:07 +0100498 val = PIR_ZCAL | PIR_DCAL | PIR_PHYRST | PIR_DRAMINIT | PIR_QSGATE |
499 PIR_RDDSKW | PIR_WRDSKW | PIR_RDEYE | PIR_WREYE;
500 if (para->type == SUNXI_DRAM_TYPE_DDR3)
501 val |= PIR_DRAMRST | PIR_WL;
502 mctl_phy_pir_init(val);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800503
Andre Przywarac78a47a2019-07-15 02:27:07 +0100504 /* TODO: DDR4 types ? */
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800505 for (i = 0; i < 4; i++)
506 writel(0x00000909, &mctl_phy->dx[i].gcr[5]);
507
508 for (i = 0; i < 4; i++) {
509 if (IS_ENABLED(CONFIG_DRAM_ODT_EN))
510 val = 0x0;
511 else
512 val = 0xaaaa;
513 clrsetbits_le32(&mctl_phy->dx[i].gcr[2], 0xffff, val);
514
515 if (IS_ENABLED(CONFIG_DRAM_ODT_EN))
516 val = 0x0;
517 else
518 val = 0x2020;
519 clrsetbits_le32(&mctl_phy->dx[i].gcr[3], 0x3030, val);
520 }
521
522 mctl_bit_delay_set(para);
523 udelay(1);
524
525 setbits_le32(&mctl_phy->pgcr[6], BIT(0));
526 clrbits_le32(&mctl_phy->pgcr[6], 0xfff8);
527 for (i = 0; i < 4; i++)
528 clrbits_le32(&mctl_phy->dx[i].gcr[3], ~0x3ffff);
529 udelay(10);
530
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800531 if (readl(&mctl_phy->pgsr[0]) & 0xff00000) {
532 /* Oops! There's something wrong! */
533 debug("PLL = %x\n", readl(0x3001010));
534 debug("DRAM PHY PGSR0 = %x\n", readl(&mctl_phy->pgsr[0]));
535 for (i = 0; i < 4; i++)
536 debug("DRAM PHY DX%dRSR0 = %x\n", i, readl(&mctl_phy->dx[i].rsr[0]));
Jernej Skrabec738334f2020-03-12 17:46:00 +0000537 debug("Error while initializing DRAM PHY!\n");
538
539 return false;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800540 }
541
Andre Przywarac78a47a2019-07-15 02:27:07 +0100542 if (sunxi_dram_is_lpddr(para->type))
543 clrsetbits_le32(&mctl_phy->dsgcr, 0xc0, 0x40);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800544 clrbits_le32(&mctl_phy->pgcr[1], 0x40);
545 clrbits_le32(&mctl_ctl->dfimisc, BIT(0));
546 writel(1, &mctl_ctl->swctl);
547 mctl_await_completion(&mctl_ctl->swstat, 1, 1);
548 clrbits_le32(&mctl_ctl->rfshctl3, BIT(0));
549
550 setbits_le32(&mctl_com->unk_0x014, BIT(31));
551 writel(0xffffffff, &mctl_com->maer0);
552 writel(0x7ff, &mctl_com->maer1);
553 writel(0xffff, &mctl_com->maer2);
Jernej Skrabec738334f2020-03-12 17:46:00 +0000554
555 return true;
556}
557
558static void mctl_auto_detect_rank_width(struct dram_para *para)
559{
560 /* this is minimum size that it's supported */
561 para->cols = 8;
562 para->rows = 13;
563
564 /*
565 * Previous versions of this driver tried to auto detect the rank
566 * and width by looking at controller registers. However this proved
567 * to be not reliable, so this approach here is the more robust
568 * solution. Check the git history for details.
569 *
570 * Strategy here is to test most demanding combination first and least
571 * demanding last, otherwise HW might not be fully utilized. For
572 * example, half bus width and rank = 1 combination would also work
573 * on HW with full bus width and rank = 2, but only 1/4 RAM would be
574 * visible.
575 */
576
577 debug("testing 32-bit width, rank = 2\n");
578 para->bus_full_width = 1;
579 para->ranks = 2;
580 if (mctl_core_init(para))
581 return;
582
583 debug("testing 32-bit width, rank = 1\n");
584 para->bus_full_width = 1;
585 para->ranks = 1;
586 if (mctl_core_init(para))
587 return;
588
589 debug("testing 16-bit width, rank = 2\n");
590 para->bus_full_width = 0;
591 para->ranks = 2;
592 if (mctl_core_init(para))
593 return;
594
595 debug("testing 16-bit width, rank = 1\n");
596 para->bus_full_width = 0;
597 para->ranks = 1;
598 if (mctl_core_init(para))
599 return;
600
601 panic("This DRAM setup is currently not supported.\n");
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800602}
603
604static void mctl_auto_detect_dram_size(struct dram_para *para)
605{
Jernej Skrabec370245e2019-08-23 19:24:04 +0200606 /* TODO: non-(LP)DDR3 */
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800607
608 /* detect row address bits */
609 para->cols = 8;
610 para->rows = 18;
611 mctl_core_init(para);
612
613 for (para->rows = 13; para->rows < 18; para->rows++) {
Jernej Skrabec370245e2019-08-23 19:24:04 +0200614 /* 8 banks, 8 bit per byte and 16/32 bit width */
615 if (mctl_mem_matches((1 << (para->rows + para->cols +
616 4 + para->bus_full_width))))
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800617 break;
618 }
619
620 /* detect column address bits */
621 para->cols = 11;
622 mctl_core_init(para);
623
624 for (para->cols = 8; para->cols < 11; para->cols++) {
Jernej Skrabec370245e2019-08-23 19:24:04 +0200625 /* 8 bits per byte and 16/32 bit width */
626 if (mctl_mem_matches(1 << (para->cols + 1 +
627 para->bus_full_width)))
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800628 break;
629 }
630}
631
632unsigned long mctl_calc_size(struct dram_para *para)
633{
Jernej Skrabec370245e2019-08-23 19:24:04 +0200634 u8 width = para->bus_full_width ? 4 : 2;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800635
Jernej Skrabec370245e2019-08-23 19:24:04 +0200636 /* TODO: non-(LP)DDR3 */
637
638 /* 8 banks */
639 return (1ULL << (para->cols + para->rows + 3)) * width * para->ranks;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800640}
641
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100642#define SUN50I_H6_LPDDR3_DX_WRITE_DELAYS \
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800643 {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
644 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
645 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0 }, \
646 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100647#define SUN50I_H6_LPDDR3_DX_READ_DELAYS \
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800648 {{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
649 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
650 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
651 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }}
652
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100653#define SUN50I_H6_DDR3_DX_WRITE_DELAYS \
654 {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
655 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
656 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
657 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}
658#define SUN50I_H6_DDR3_DX_READ_DELAYS \
659 {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
660 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
661 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
662 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}
663
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800664unsigned long sunxi_dram_init(void)
665{
666 struct sunxi_mctl_com_reg * const mctl_com =
667 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
Jernej Skrabece04cd492022-01-30 15:27:13 +0100668 struct sunxi_prcm_reg *const prcm =
669 (struct sunxi_prcm_reg *)SUNXI_PRCM_BASE;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800670 struct dram_para para = {
671 .clk = CONFIG_DRAM_CLK,
Andre Przywarac78a47a2019-07-15 02:27:07 +0100672#ifdef CONFIG_SUNXI_DRAM_H6_LPDDR3
673 .type = SUNXI_DRAM_TYPE_LPDDR3,
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100674 .dx_read_delays = SUN50I_H6_LPDDR3_DX_READ_DELAYS,
675 .dx_write_delays = SUN50I_H6_LPDDR3_DX_WRITE_DELAYS,
Andre Przywarac78a47a2019-07-15 02:27:07 +0100676#elif defined(CONFIG_SUNXI_DRAM_H6_DDR3_1333)
677 .type = SUNXI_DRAM_TYPE_DDR3,
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100678 .dx_read_delays = SUN50I_H6_DDR3_DX_READ_DELAYS,
679 .dx_write_delays = SUN50I_H6_DDR3_DX_WRITE_DELAYS,
Andre Przywara1c7a7512019-07-15 02:27:06 +0100680#endif
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800681 };
682
683 unsigned long size;
684
Jernej Skrabece04cd492022-01-30 15:27:13 +0100685 setbits_le32(&prcm->res_cal_ctrl, BIT(8));
686 clrbits_le32(&prcm->ohms240, 0x3f);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800687
Jernej Skrabec738334f2020-03-12 17:46:00 +0000688 mctl_auto_detect_rank_width(&para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800689 mctl_auto_detect_dram_size(&para);
690
691 mctl_core_init(&para);
692
693 size = mctl_calc_size(&para);
694
695 clrsetbits_le32(&mctl_com->cr, 0xf0, (size >> (10 + 10 + 4)) & 0xf0);
696
697 mctl_set_master_priority();
698
699 return size;
700};