blob: b332f3a3e4aae363162cc5ccf866eee009a504cd [file] [log] [blame]
Simon Glass97589732020-05-10 11:40:02 -06001// SPDX-License-Identifier: GPL-2.0+
Icenowy Zheng4e287f62018-07-23 06:13:34 +08002/*
3 * sun50i H6 platform dram controller init
4 *
5 * (C) Copyright 2017 Icenowy Zheng <icenowy@aosc.io>
6 *
Icenowy Zheng4e287f62018-07-23 06:13:34 +08007 */
8#include <common.h>
Simon Glass97589732020-05-10 11:40:02 -06009#include <init.h>
Simon Glass0f2af882020-05-10 11:40:05 -060010#include <log.h>
Icenowy Zheng4e287f62018-07-23 06:13:34 +080011#include <asm/io.h>
12#include <asm/arch/clock.h>
13#include <asm/arch/dram.h>
14#include <asm/arch/cpu.h>
Jernej Skrabece04cd492022-01-30 15:27:13 +010015#include <asm/arch/prcm.h>
Icenowy Zheng4e287f62018-07-23 06:13:34 +080016#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060017#include <linux/delay.h>
Icenowy Zheng4e287f62018-07-23 06:13:34 +080018#include <linux/kconfig.h>
19
20/*
21 * The DRAM controller structure on H6 is similar to the ones on A23/A80:
22 * they all contains 3 parts, COM, CTL and PHY. (As a note on A33/A83T/H3/A64
23 * /H5/R40 CTL and PHY is composed).
24 *
25 * COM is allwinner-specific. On H6, the address mapping function is moved
26 * from COM to CTL (with the standard ADDRMAP registers on DesignWare memory
27 * controller).
28 *
29 * CTL (controller) and PHY is from DesignWare.
30 *
31 * The CTL part is a bit similar to the one on A23/A80 (because they all
32 * originate from DesignWare), but gets more registers added.
33 *
34 * The PHY part is quite new, not seen in any previous Allwinner SoCs, and
35 * not seen on other SoCs in U-Boot. The only SoC that is also known to have
36 * similar PHY is ZynqMP.
37 */
38
Icenowy Zheng4e287f62018-07-23 06:13:34 +080039static void mctl_sys_init(struct dram_para *para);
40static void mctl_com_init(struct dram_para *para);
Jernej Skrabec738334f2020-03-12 17:46:00 +000041static bool mctl_channel_init(struct dram_para *para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +080042
Jernej Skrabec738334f2020-03-12 17:46:00 +000043static bool mctl_core_init(struct dram_para *para)
Icenowy Zheng4e287f62018-07-23 06:13:34 +080044{
45 mctl_sys_init(para);
46 mctl_com_init(para);
47 switch (para->type) {
48 case SUNXI_DRAM_TYPE_LPDDR3:
Andre Przywarac78a47a2019-07-15 02:27:07 +010049 case SUNXI_DRAM_TYPE_DDR3:
Andre Przywara1c7a7512019-07-15 02:27:06 +010050 mctl_set_timing_params(para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +080051 break;
52 default:
53 panic("Unsupported DRAM type!");
54 };
Jernej Skrabec738334f2020-03-12 17:46:00 +000055 return mctl_channel_init(para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +080056}
57
Andre Przywara595475e2019-07-15 02:27:05 +010058/* PHY initialisation */
Icenowy Zheng4e287f62018-07-23 06:13:34 +080059static void mctl_phy_pir_init(u32 val)
60{
61 struct sunxi_mctl_phy_reg * const mctl_phy =
62 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
63
Andre Przywara595475e2019-07-15 02:27:05 +010064 writel(val, &mctl_phy->pir);
65 writel(val | BIT(0), &mctl_phy->pir); /* Start initialisation. */
Icenowy Zheng4e287f62018-07-23 06:13:34 +080066 mctl_await_completion(&mctl_phy->pgsr[0], BIT(0), BIT(0));
67}
68
69enum {
70 MBUS_PORT_CPU = 0,
71 MBUS_PORT_GPU = 1,
72 MBUS_PORT_MAHB = 2,
73 MBUS_PORT_DMA = 3,
74 MBUS_PORT_VE = 4,
75 MBUS_PORT_CE = 5,
76 MBUS_PORT_TSC0 = 6,
77 MBUS_PORT_NDFC0 = 8,
78 MBUS_PORT_CSI0 = 11,
79 MBUS_PORT_DI0 = 14,
80 MBUS_PORT_DI1 = 15,
81 MBUS_PORT_DE300 = 16,
82 MBUS_PORT_IOMMU = 25,
83 MBUS_PORT_VE2 = 26,
84 MBUS_PORT_USB3 = 37,
85 MBUS_PORT_PCIE = 38,
86 MBUS_PORT_VP9 = 39,
87 MBUS_PORT_HDCP2 = 40,
88};
89
90enum {
91 MBUS_QOS_LOWEST = 0,
92 MBUS_QOS_LOW,
93 MBUS_QOS_HIGH,
94 MBUS_QOS_HIGHEST
95};
96inline void mbus_configure_port(u8 port,
97 bool bwlimit,
98 bool priority,
99 u8 qos,
100 u8 waittime,
101 u8 acs,
102 u16 bwl0,
103 u16 bwl1,
104 u16 bwl2)
105{
106 struct sunxi_mctl_com_reg * const mctl_com =
107 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
108
109 const u32 cfg0 = ( (bwlimit ? (1 << 0) : 0)
110 | (priority ? (1 << 1) : 0)
111 | ((qos & 0x3) << 2)
112 | ((waittime & 0xf) << 4)
113 | ((acs & 0xff) << 8)
114 | (bwl0 << 16) );
115 const u32 cfg1 = ((u32)bwl2 << 16) | (bwl1 & 0xffff);
116
117 debug("MBUS port %d cfg0 %08x cfg1 %08x\n", port, cfg0, cfg1);
118 writel(cfg0, &mctl_com->master[port].cfg0);
119 writel(cfg1, &mctl_com->master[port].cfg1);
120}
121
122#define MBUS_CONF(port, bwlimit, qos, acs, bwl0, bwl1, bwl2) \
123 mbus_configure_port(MBUS_PORT_ ## port, bwlimit, false, \
124 MBUS_QOS_ ## qos, 0, acs, bwl0, bwl1, bwl2)
125
126static void mctl_set_master_priority(void)
127{
128 struct sunxi_mctl_com_reg * const mctl_com =
129 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
130
131 /* enable bandwidth limit windows and set windows size 1us */
132 writel(399, &mctl_com->tmr);
133 writel(BIT(16), &mctl_com->bwcr);
134
135 MBUS_CONF( CPU, true, HIGHEST, 0, 256, 128, 100);
136 MBUS_CONF( GPU, true, HIGH, 0, 1536, 1400, 256);
137 MBUS_CONF( MAHB, true, HIGHEST, 0, 512, 256, 96);
138 MBUS_CONF( DMA, true, HIGH, 0, 256, 100, 80);
139 MBUS_CONF( VE, true, HIGH, 2, 8192, 5500, 5000);
140 MBUS_CONF( CE, true, HIGH, 2, 100, 64, 32);
141 MBUS_CONF( TSC0, true, HIGH, 2, 100, 64, 32);
142 MBUS_CONF(NDFC0, true, HIGH, 0, 256, 128, 64);
143 MBUS_CONF( CSI0, true, HIGH, 0, 256, 128, 100);
144 MBUS_CONF( DI0, true, HIGH, 0, 1024, 256, 64);
145 MBUS_CONF(DE300, true, HIGHEST, 6, 8192, 2800, 2400);
146 MBUS_CONF(IOMMU, true, HIGHEST, 0, 100, 64, 32);
147 MBUS_CONF( VE2, true, HIGH, 2, 8192, 5500, 5000);
148 MBUS_CONF( USB3, true, HIGH, 0, 256, 128, 64);
149 MBUS_CONF( PCIE, true, HIGH, 2, 100, 64, 32);
150 MBUS_CONF( VP9, true, HIGH, 2, 8192, 5500, 5000);
151 MBUS_CONF(HDCP2, true, HIGH, 2, 100, 64, 32);
152}
153
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800154static void mctl_sys_init(struct dram_para *para)
155{
156 struct sunxi_ccm_reg * const ccm =
157 (struct sunxi_ccm_reg *)SUNXI_CCM_BASE;
158 struct sunxi_mctl_com_reg * const mctl_com =
159 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
160 struct sunxi_mctl_ctl_reg * const mctl_ctl =
161 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
162
163 /* Put all DRAM-related blocks to reset state */
164 clrbits_le32(&ccm->mbus_cfg, MBUS_ENABLE | MBUS_RESET);
Icenowy Zhengac2ed962018-10-06 23:23:32 +0800165 clrbits_le32(&ccm->dram_gate_reset, BIT(0));
166 udelay(5);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800167 writel(0, &ccm->dram_gate_reset);
168 clrbits_le32(&ccm->pll5_cfg, CCM_PLL5_CTRL_EN);
169 clrbits_le32(&ccm->dram_clk_cfg, DRAM_MOD_RESET);
170
171 udelay(5);
172
173 /* Set PLL5 rate to doubled DRAM clock rate */
174 writel(CCM_PLL5_CTRL_EN | CCM_PLL5_LOCK_EN |
Andre Przywara0f7c8bc2021-05-05 13:53:05 +0100175 CCM_PLL5_CTRL_N(para->clk * 2 / 24), &ccm->pll5_cfg);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800176 mctl_await_completion(&ccm->pll5_cfg, CCM_PLL5_LOCK, CCM_PLL5_LOCK);
177
178 /* Configure DRAM mod clock */
179 writel(DRAM_CLK_SRC_PLL5, &ccm->dram_clk_cfg);
180 setbits_le32(&ccm->dram_clk_cfg, DRAM_CLK_UPDATE);
Icenowy Zhengac2ed962018-10-06 23:23:32 +0800181 writel(BIT(RESET_SHIFT), &ccm->dram_gate_reset);
182 udelay(5);
183 setbits_le32(&ccm->dram_gate_reset, BIT(0));
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800184
185 /* Disable all channels */
186 writel(0, &mctl_com->maer0);
187 writel(0, &mctl_com->maer1);
188 writel(0, &mctl_com->maer2);
189
190 /* Configure MBUS and enable DRAM mod reset */
191 setbits_le32(&ccm->mbus_cfg, MBUS_RESET);
192 setbits_le32(&ccm->mbus_cfg, MBUS_ENABLE);
193 setbits_le32(&ccm->dram_clk_cfg, DRAM_MOD_RESET);
194 udelay(5);
195
196 /* Unknown hack from the BSP, which enables access of mctl_ctl regs */
197 writel(0x8000, &mctl_ctl->unk_0x00c);
198}
199
200static void mctl_set_addrmap(struct dram_para *para)
201{
202 struct sunxi_mctl_ctl_reg * const mctl_ctl =
203 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
204 u8 cols = para->cols;
205 u8 rows = para->rows;
206 u8 ranks = para->ranks;
207
Jernej Skrabec370245e2019-08-23 19:24:04 +0200208 if (!para->bus_full_width)
209 cols -= 1;
210
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800211 /* Ranks */
212 if (ranks == 2)
213 mctl_ctl->addrmap[0] = rows + cols - 3;
214 else
215 mctl_ctl->addrmap[0] = 0x1F;
216
217 /* Banks, hardcoded to 8 banks now */
218 mctl_ctl->addrmap[1] = (cols - 2) | (cols - 2) << 8 | (cols - 2) << 16;
219
220 /* Columns */
221 mctl_ctl->addrmap[2] = 0;
222 switch (cols) {
Jernej Skrabec370245e2019-08-23 19:24:04 +0200223 case 7:
224 mctl_ctl->addrmap[3] = 0x1F1F1F00;
225 mctl_ctl->addrmap[4] = 0x1F1F;
226 break;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800227 case 8:
228 mctl_ctl->addrmap[3] = 0x1F1F0000;
229 mctl_ctl->addrmap[4] = 0x1F1F;
230 break;
231 case 9:
232 mctl_ctl->addrmap[3] = 0x1F000000;
233 mctl_ctl->addrmap[4] = 0x1F1F;
234 break;
235 case 10:
236 mctl_ctl->addrmap[3] = 0;
237 mctl_ctl->addrmap[4] = 0x1F1F;
238 break;
239 case 11:
240 mctl_ctl->addrmap[3] = 0;
241 mctl_ctl->addrmap[4] = 0x1F00;
242 break;
243 case 12:
244 mctl_ctl->addrmap[3] = 0;
245 mctl_ctl->addrmap[4] = 0;
246 break;
247 default:
248 panic("Unsupported DRAM configuration: column number invalid\n");
249 }
250
251 /* Rows */
252 mctl_ctl->addrmap[5] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
253 switch (rows) {
254 case 13:
255 mctl_ctl->addrmap[6] = (cols - 3) | 0x0F0F0F00;
256 mctl_ctl->addrmap[7] = 0x0F0F;
257 break;
258 case 14:
259 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | 0x0F0F0000;
260 mctl_ctl->addrmap[7] = 0x0F0F;
261 break;
262 case 15:
263 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | 0x0F000000;
264 mctl_ctl->addrmap[7] = 0x0F0F;
265 break;
266 case 16:
267 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
268 mctl_ctl->addrmap[7] = 0x0F0F;
269 break;
270 case 17:
271 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
272 mctl_ctl->addrmap[7] = (cols - 3) | 0x0F00;
273 break;
274 case 18:
275 mctl_ctl->addrmap[6] = (cols - 3) | ((cols - 3) << 8) | ((cols - 3) << 16) | ((cols - 3) << 24);
276 mctl_ctl->addrmap[7] = (cols - 3) | ((cols - 3) << 8);
277 break;
278 default:
279 panic("Unsupported DRAM configuration: row number invalid\n");
280 }
281
282 /* Bank groups, DDR4 only */
283 mctl_ctl->addrmap[8] = 0x3F3F;
284}
285
286static void mctl_com_init(struct dram_para *para)
287{
288 struct sunxi_mctl_com_reg * const mctl_com =
289 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
290 struct sunxi_mctl_ctl_reg * const mctl_ctl =
291 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
292 struct sunxi_mctl_phy_reg * const mctl_phy =
293 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
294 u32 reg_val, tmp;
295
296 mctl_set_addrmap(para);
297
298 setbits_le32(&mctl_com->cr, BIT(31));
Andre Przywarac78a47a2019-07-15 02:27:07 +0100299
300 /* The bonding ID seems to be always 7. */
301 if (readl(SUNXI_SIDC_BASE + 0x100) == 7) /* bonding ID */
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800302 clrbits_le32(&mctl_com->cr, BIT(27));
Andre Przywarac78a47a2019-07-15 02:27:07 +0100303 else if (readl(SUNXI_SIDC_BASE + 0x100) == 3)
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800304 setbits_le32(&mctl_com->cr, BIT(27));
305
306 if (para->clk > 408)
307 reg_val = 0xf00;
308 else if (para->clk > 246)
309 reg_val = 0x1f00;
310 else
311 reg_val = 0x3f00;
312 clrsetbits_le32(&mctl_com->unk_0x008, 0x3f00, reg_val);
313
Jernej Skrabec370245e2019-08-23 19:24:04 +0200314 /* TODO: DDR4 */
315 reg_val = MSTR_BURST_LENGTH(8) | MSTR_ACTIVE_RANKS(para->ranks);
Andre Przywarac78a47a2019-07-15 02:27:07 +0100316 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
317 reg_val |= MSTR_DEVICETYPE_LPDDR3;
318 if (para->type == SUNXI_DRAM_TYPE_DDR3)
319 reg_val |= MSTR_DEVICETYPE_DDR3 | MSTR_2TMODE;
Jernej Skrabec370245e2019-08-23 19:24:04 +0200320 if (para->bus_full_width)
321 reg_val |= MSTR_BUSWIDTH_FULL;
322 else
323 reg_val |= MSTR_BUSWIDTH_HALF;
Andre Przywarac78a47a2019-07-15 02:27:07 +0100324 writel(reg_val | BIT(31), &mctl_ctl->mstr);
325
326 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
327 reg_val = DCR_LPDDR3 | DCR_DDR8BANK;
328 if (para->type == SUNXI_DRAM_TYPE_DDR3)
329 reg_val = DCR_DDR3 | DCR_DDR8BANK | DCR_DDR2T;
330 writel(reg_val | 0x400, &mctl_phy->dcr);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800331
332 if (para->ranks == 2)
333 writel(0x0303, &mctl_ctl->odtmap);
334 else
335 writel(0x0201, &mctl_ctl->odtmap);
336
Andre Przywarac78a47a2019-07-15 02:27:07 +0100337 /* TODO: DDR4 */
338 if (para->type == SUNXI_DRAM_TYPE_LPDDR3) {
339 tmp = para->clk * 7 / 2000;
340 reg_val = 0x0400;
341 reg_val |= (tmp + 7) << 24;
342 reg_val |= (((para->clk < 400) ? 3 : 4) - tmp) << 16;
343 } else if (para->type == SUNXI_DRAM_TYPE_DDR3) {
344 reg_val = 0x06000400; /* TODO?: Use CL - CWL value in [7:0] */
345 } else {
346 panic("Only (LP)DDR3 supported (type = %d)\n", para->type);
347 }
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800348 writel(reg_val, &mctl_ctl->odtcfg);
349
Jernej Skrabec370245e2019-08-23 19:24:04 +0200350 if (!para->bus_full_width) {
351 writel(0x0, &mctl_phy->dx[2].gcr[0]);
352 writel(0x0, &mctl_phy->dx[3].gcr[0]);
353 }
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800354}
355
356static void mctl_bit_delay_set(struct dram_para *para)
357{
358 struct sunxi_mctl_phy_reg * const mctl_phy =
359 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
360 int i, j;
361 u32 val;
362
363 for (i = 0; i < 4; i++) {
364 val = readl(&mctl_phy->dx[i].bdlr0);
365 for (j = 0; j < 4; j++)
366 val += para->dx_write_delays[i][j] << (j * 8);
367 writel(val, &mctl_phy->dx[i].bdlr0);
368
369 val = readl(&mctl_phy->dx[i].bdlr1);
370 for (j = 0; j < 4; j++)
371 val += para->dx_write_delays[i][j + 4] << (j * 8);
372 writel(val, &mctl_phy->dx[i].bdlr1);
373
374 val = readl(&mctl_phy->dx[i].bdlr2);
375 for (j = 0; j < 4; j++)
376 val += para->dx_write_delays[i][j + 8] << (j * 8);
377 writel(val, &mctl_phy->dx[i].bdlr2);
378 }
379 clrbits_le32(&mctl_phy->pgcr[0], BIT(26));
380
381 for (i = 0; i < 4; i++) {
382 val = readl(&mctl_phy->dx[i].bdlr3);
383 for (j = 0; j < 4; j++)
384 val += para->dx_read_delays[i][j] << (j * 8);
385 writel(val, &mctl_phy->dx[i].bdlr3);
386
387 val = readl(&mctl_phy->dx[i].bdlr4);
388 for (j = 0; j < 4; j++)
389 val += para->dx_read_delays[i][j + 4] << (j * 8);
390 writel(val, &mctl_phy->dx[i].bdlr4);
391
392 val = readl(&mctl_phy->dx[i].bdlr5);
393 for (j = 0; j < 4; j++)
394 val += para->dx_read_delays[i][j + 8] << (j * 8);
395 writel(val, &mctl_phy->dx[i].bdlr5);
396
397 val = readl(&mctl_phy->dx[i].bdlr6);
398 val += (para->dx_read_delays[i][12] << 8) |
399 (para->dx_read_delays[i][13] << 16);
400 writel(val, &mctl_phy->dx[i].bdlr6);
401 }
402 setbits_le32(&mctl_phy->pgcr[0], BIT(26));
403 udelay(1);
404
Andre Przywarac78a47a2019-07-15 02:27:07 +0100405 if (para->type != SUNXI_DRAM_TYPE_LPDDR3)
406 return;
407
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800408 for (i = 1; i < 14; i++) {
409 val = readl(&mctl_phy->acbdlr[i]);
410 val += 0x0a0a0a0a;
411 writel(val, &mctl_phy->acbdlr[i]);
412 }
413}
414
Jernej Skrabec738334f2020-03-12 17:46:00 +0000415static bool mctl_channel_init(struct dram_para *para)
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800416{
417 struct sunxi_mctl_com_reg * const mctl_com =
418 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
419 struct sunxi_mctl_ctl_reg * const mctl_ctl =
420 (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
421 struct sunxi_mctl_phy_reg * const mctl_phy =
422 (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
423 int i;
424 u32 val;
425
426 setbits_le32(&mctl_ctl->dfiupd[0], BIT(31) | BIT(30));
427 setbits_le32(&mctl_ctl->zqctl[0], BIT(31) | BIT(30));
428 writel(0x2f05, &mctl_ctl->sched[0]);
429 setbits_le32(&mctl_ctl->rfshctl3, BIT(0));
430 setbits_le32(&mctl_ctl->dfimisc, BIT(0));
431 setbits_le32(&mctl_ctl->unk_0x00c, BIT(8));
432 clrsetbits_le32(&mctl_phy->pgcr[1], 0x180, 0xc0);
433 /* TODO: non-LPDDR3 types */
434 clrsetbits_le32(&mctl_phy->pgcr[2], GENMASK(17, 0), ns_to_t(7800));
435 clrbits_le32(&mctl_phy->pgcr[6], BIT(0));
436 clrsetbits_le32(&mctl_phy->dxccr, 0xee0, 0x220);
437 /* TODO: VT compensation */
438 clrsetbits_le32(&mctl_phy->dsgcr, BIT(0), 0x440060);
439 clrbits_le32(&mctl_phy->vtcr[1], BIT(1));
440
441 for (i = 0; i < 4; i++)
442 clrsetbits_le32(&mctl_phy->dx[i].gcr[0], 0xe00, 0x800);
443 for (i = 0; i < 4; i++)
444 clrsetbits_le32(&mctl_phy->dx[i].gcr[2], 0xffff, 0x5555);
445 for (i = 0; i < 4; i++)
446 clrsetbits_le32(&mctl_phy->dx[i].gcr[3], 0x3030, 0x1010);
447
448 udelay(100);
449
450 if (para->ranks == 2)
451 setbits_le32(&mctl_phy->dtcr[1], 0x30000);
452 else
453 clrsetbits_le32(&mctl_phy->dtcr[1], 0x30000, 0x10000);
454
Andre Przywarac78a47a2019-07-15 02:27:07 +0100455 if (sunxi_dram_is_lpddr(para->type))
456 clrbits_le32(&mctl_phy->dtcr[1], BIT(1));
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800457 if (para->ranks == 2) {
458 writel(0x00010001, &mctl_phy->rankidr);
459 writel(0x20000, &mctl_phy->odtcr);
460 } else {
461 writel(0x0, &mctl_phy->rankidr);
462 writel(0x10000, &mctl_phy->odtcr);
463 }
464
Andre Przywarac78a47a2019-07-15 02:27:07 +0100465 /* set bits [3:0] to 1? 0 not valid in ZynqMP d/s */
466 if (para->type == SUNXI_DRAM_TYPE_LPDDR3)
467 clrsetbits_le32(&mctl_phy->dtcr[0], 0xF0000000, 0x10000040);
468 else
469 clrsetbits_le32(&mctl_phy->dtcr[0], 0xF0000000, 0x10000000);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800470 if (para->clk <= 792) {
471 if (para->clk <= 672) {
472 if (para->clk <= 600)
473 val = 0x300;
474 else
475 val = 0x400;
476 } else {
477 val = 0x500;
478 }
479 } else {
480 val = 0x600;
481 }
482 /* FIXME: NOT REVIEWED YET */
483 clrsetbits_le32(&mctl_phy->zq[0].zqcr, 0x700, val);
484 clrsetbits_le32(&mctl_phy->zq[0].zqpr[0], 0xff,
485 CONFIG_DRAM_ZQ & 0xff);
486 clrbits_le32(&mctl_phy->zq[0].zqor[0], 0xfffff);
487 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ >> 8) & 0xff);
488 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ & 0xf00) - 0x100);
489 setbits_le32(&mctl_phy->zq[0].zqor[0], (CONFIG_DRAM_ZQ & 0xff00) << 4);
490 clrbits_le32(&mctl_phy->zq[1].zqpr[0], 0xfffff);
491 setbits_le32(&mctl_phy->zq[1].zqpr[0], (CONFIG_DRAM_ZQ >> 16) & 0xff);
492 setbits_le32(&mctl_phy->zq[1].zqpr[0], ((CONFIG_DRAM_ZQ >> 8) & 0xf00) - 0x100);
493 setbits_le32(&mctl_phy->zq[1].zqpr[0], (CONFIG_DRAM_ZQ & 0xff0000) >> 4);
494 if (para->type == SUNXI_DRAM_TYPE_LPDDR3) {
495 for (i = 1; i < 14; i++)
496 writel(0x06060606, &mctl_phy->acbdlr[i]);
497 }
498
Andre Przywarac78a47a2019-07-15 02:27:07 +0100499 val = PIR_ZCAL | PIR_DCAL | PIR_PHYRST | PIR_DRAMINIT | PIR_QSGATE |
500 PIR_RDDSKW | PIR_WRDSKW | PIR_RDEYE | PIR_WREYE;
501 if (para->type == SUNXI_DRAM_TYPE_DDR3)
502 val |= PIR_DRAMRST | PIR_WL;
503 mctl_phy_pir_init(val);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800504
Andre Przywarac78a47a2019-07-15 02:27:07 +0100505 /* TODO: DDR4 types ? */
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800506 for (i = 0; i < 4; i++)
507 writel(0x00000909, &mctl_phy->dx[i].gcr[5]);
508
509 for (i = 0; i < 4; i++) {
510 if (IS_ENABLED(CONFIG_DRAM_ODT_EN))
511 val = 0x0;
512 else
513 val = 0xaaaa;
514 clrsetbits_le32(&mctl_phy->dx[i].gcr[2], 0xffff, val);
515
516 if (IS_ENABLED(CONFIG_DRAM_ODT_EN))
517 val = 0x0;
518 else
519 val = 0x2020;
520 clrsetbits_le32(&mctl_phy->dx[i].gcr[3], 0x3030, val);
521 }
522
523 mctl_bit_delay_set(para);
524 udelay(1);
525
526 setbits_le32(&mctl_phy->pgcr[6], BIT(0));
527 clrbits_le32(&mctl_phy->pgcr[6], 0xfff8);
528 for (i = 0; i < 4; i++)
529 clrbits_le32(&mctl_phy->dx[i].gcr[3], ~0x3ffff);
530 udelay(10);
531
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800532 if (readl(&mctl_phy->pgsr[0]) & 0xff00000) {
533 /* Oops! There's something wrong! */
534 debug("PLL = %x\n", readl(0x3001010));
535 debug("DRAM PHY PGSR0 = %x\n", readl(&mctl_phy->pgsr[0]));
536 for (i = 0; i < 4; i++)
537 debug("DRAM PHY DX%dRSR0 = %x\n", i, readl(&mctl_phy->dx[i].rsr[0]));
Jernej Skrabec738334f2020-03-12 17:46:00 +0000538 debug("Error while initializing DRAM PHY!\n");
539
540 return false;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800541 }
542
Andre Przywarac78a47a2019-07-15 02:27:07 +0100543 if (sunxi_dram_is_lpddr(para->type))
544 clrsetbits_le32(&mctl_phy->dsgcr, 0xc0, 0x40);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800545 clrbits_le32(&mctl_phy->pgcr[1], 0x40);
546 clrbits_le32(&mctl_ctl->dfimisc, BIT(0));
547 writel(1, &mctl_ctl->swctl);
548 mctl_await_completion(&mctl_ctl->swstat, 1, 1);
549 clrbits_le32(&mctl_ctl->rfshctl3, BIT(0));
550
551 setbits_le32(&mctl_com->unk_0x014, BIT(31));
552 writel(0xffffffff, &mctl_com->maer0);
553 writel(0x7ff, &mctl_com->maer1);
554 writel(0xffff, &mctl_com->maer2);
Jernej Skrabec738334f2020-03-12 17:46:00 +0000555
556 return true;
557}
558
559static void mctl_auto_detect_rank_width(struct dram_para *para)
560{
561 /* this is minimum size that it's supported */
562 para->cols = 8;
563 para->rows = 13;
564
565 /*
566 * Previous versions of this driver tried to auto detect the rank
567 * and width by looking at controller registers. However this proved
568 * to be not reliable, so this approach here is the more robust
569 * solution. Check the git history for details.
570 *
571 * Strategy here is to test most demanding combination first and least
572 * demanding last, otherwise HW might not be fully utilized. For
573 * example, half bus width and rank = 1 combination would also work
574 * on HW with full bus width and rank = 2, but only 1/4 RAM would be
575 * visible.
576 */
577
578 debug("testing 32-bit width, rank = 2\n");
579 para->bus_full_width = 1;
580 para->ranks = 2;
581 if (mctl_core_init(para))
582 return;
583
584 debug("testing 32-bit width, rank = 1\n");
585 para->bus_full_width = 1;
586 para->ranks = 1;
587 if (mctl_core_init(para))
588 return;
589
590 debug("testing 16-bit width, rank = 2\n");
591 para->bus_full_width = 0;
592 para->ranks = 2;
593 if (mctl_core_init(para))
594 return;
595
596 debug("testing 16-bit width, rank = 1\n");
597 para->bus_full_width = 0;
598 para->ranks = 1;
599 if (mctl_core_init(para))
600 return;
601
602 panic("This DRAM setup is currently not supported.\n");
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800603}
604
605static void mctl_auto_detect_dram_size(struct dram_para *para)
606{
Jernej Skrabec370245e2019-08-23 19:24:04 +0200607 /* TODO: non-(LP)DDR3 */
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800608
609 /* detect row address bits */
610 para->cols = 8;
611 para->rows = 18;
612 mctl_core_init(para);
613
614 for (para->rows = 13; para->rows < 18; para->rows++) {
Jernej Skrabec370245e2019-08-23 19:24:04 +0200615 /* 8 banks, 8 bit per byte and 16/32 bit width */
616 if (mctl_mem_matches((1 << (para->rows + para->cols +
617 4 + para->bus_full_width))))
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800618 break;
619 }
620
621 /* detect column address bits */
622 para->cols = 11;
623 mctl_core_init(para);
624
625 for (para->cols = 8; para->cols < 11; para->cols++) {
Jernej Skrabec370245e2019-08-23 19:24:04 +0200626 /* 8 bits per byte and 16/32 bit width */
627 if (mctl_mem_matches(1 << (para->cols + 1 +
628 para->bus_full_width)))
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800629 break;
630 }
631}
632
633unsigned long mctl_calc_size(struct dram_para *para)
634{
Jernej Skrabec370245e2019-08-23 19:24:04 +0200635 u8 width = para->bus_full_width ? 4 : 2;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800636
Jernej Skrabec370245e2019-08-23 19:24:04 +0200637 /* TODO: non-(LP)DDR3 */
638
639 /* 8 banks */
640 return (1ULL << (para->cols + para->rows + 3)) * width * para->ranks;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800641}
642
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100643#define SUN50I_H6_LPDDR3_DX_WRITE_DELAYS \
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800644 {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
645 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
646 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0 }, \
647 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100648#define SUN50I_H6_LPDDR3_DX_READ_DELAYS \
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800649 {{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
650 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
651 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
652 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }}
653
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100654#define SUN50I_H6_DDR3_DX_WRITE_DELAYS \
655 {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
656 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
657 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
658 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}
659#define SUN50I_H6_DDR3_DX_READ_DELAYS \
660 {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
661 { 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0 }, \
662 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
663 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}
664
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800665unsigned long sunxi_dram_init(void)
666{
667 struct sunxi_mctl_com_reg * const mctl_com =
668 (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
Jernej Skrabece04cd492022-01-30 15:27:13 +0100669 struct sunxi_prcm_reg *const prcm =
670 (struct sunxi_prcm_reg *)SUNXI_PRCM_BASE;
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800671 struct dram_para para = {
672 .clk = CONFIG_DRAM_CLK,
Andre Przywarac78a47a2019-07-15 02:27:07 +0100673#ifdef CONFIG_SUNXI_DRAM_H6_LPDDR3
674 .type = SUNXI_DRAM_TYPE_LPDDR3,
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100675 .dx_read_delays = SUN50I_H6_LPDDR3_DX_READ_DELAYS,
676 .dx_write_delays = SUN50I_H6_LPDDR3_DX_WRITE_DELAYS,
Andre Przywarac78a47a2019-07-15 02:27:07 +0100677#elif defined(CONFIG_SUNXI_DRAM_H6_DDR3_1333)
678 .type = SUNXI_DRAM_TYPE_DDR3,
Jernej Skrabecfed1d2f2019-07-15 02:27:09 +0100679 .dx_read_delays = SUN50I_H6_DDR3_DX_READ_DELAYS,
680 .dx_write_delays = SUN50I_H6_DDR3_DX_WRITE_DELAYS,
Andre Przywara1c7a7512019-07-15 02:27:06 +0100681#endif
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800682 };
683
684 unsigned long size;
685
Jernej Skrabece04cd492022-01-30 15:27:13 +0100686 setbits_le32(&prcm->res_cal_ctrl, BIT(8));
687 clrbits_le32(&prcm->ohms240, 0x3f);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800688
Jernej Skrabec738334f2020-03-12 17:46:00 +0000689 mctl_auto_detect_rank_width(&para);
Icenowy Zheng4e287f62018-07-23 06:13:34 +0800690 mctl_auto_detect_dram_size(&para);
691
692 mctl_core_init(&para);
693
694 size = mctl_calc_size(&para);
695
696 clrsetbits_le32(&mctl_com->cr, 0xf0, (size >> (10 + 10 + 4)) & 0xf0);
697
698 mctl_set_master_priority();
699
700 return size;
701};