blob: 8d1b9faacc01c39677317a30d5faf2de783921a1 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Philipp Tomsichd21a4d82017-06-23 00:12:05 +02002/*
3 * (C) Copyright 2017 Theobroma Systems Design und Consulting GmbH
Philipp Tomsichd21a4d82017-06-23 00:12:05 +02004 */
5
6#include <common.h>
7#include <clk.h>
8#include <dm.h>
9#include <dt-bindings/memory/rk3368-dmc.h>
10#include <dt-structs.h>
11#include <ram.h>
12#include <regmap.h>
13#include <syscon.h>
14#include <asm/io.h>
15#include <asm/arch/clock.h>
16#include <asm/arch/cru_rk3368.h>
17#include <asm/arch/grf_rk3368.h>
18#include <asm/arch/ddr_rk3368.h>
19#include <asm/arch/sdram.h>
20#include <asm/arch/sdram_common.h>
21
Philipp Tomsichd21a4d82017-06-23 00:12:05 +020022struct dram_info {
23 struct ram_info info;
24 struct clk ddr_clk;
25 struct rk3368_cru *cru;
26 struct rk3368_grf *grf;
27 struct rk3368_ddr_pctl *pctl;
28 struct rk3368_ddrphy *phy;
29 struct rk3368_pmu_grf *pmugrf;
30 struct rk3368_msch *msch;
31};
32
33struct rk3368_sdram_params {
34#if CONFIG_IS_ENABLED(OF_PLATDATA)
35 struct dtd_rockchip_rk3368_dmc of_plat;
36#endif
37 struct rk3288_sdram_pctl_timing pctl_timing;
38 u32 trefi_mem_ddr3;
39 struct rk3288_sdram_channel chan;
40 struct regmap *map;
41 u32 ddr_freq;
42 u32 memory_schedule;
43 u32 ddr_speed_bin;
44 u32 tfaw_mult;
45};
46
47/* PTCL bits */
48enum {
49 /* PCTL_DFISTCFG0 */
50 DFI_INIT_START = BIT(0),
51 DFI_DATA_BYTE_DISABLE_EN = BIT(2),
52
53 /* PCTL_DFISTCFG1 */
54 DFI_DRAM_CLK_SR_EN = BIT(0),
55 DFI_DRAM_CLK_DPD_EN = BIT(1),
56 ODT_LEN_BL8_W_SHIFT = 16,
57
58 /* PCTL_DFISTCFG2 */
59 DFI_PARITY_INTR_EN = BIT(0),
60 DFI_PARITY_EN = BIT(1),
61
62 /* PCTL_DFILPCFG0 */
63 TLP_RESP_TIME_SHIFT = 16,
64 LP_SR_EN = BIT(8),
65 LP_PD_EN = BIT(0),
66
67 /* PCTL_DFIODTCFG */
68 RANK0_ODT_WRITE_SEL = BIT(3),
69 RANK1_ODT_WRITE_SEL = BIT(11),
70
71 /* PCTL_SCFG */
72 HW_LOW_POWER_EN = BIT(0),
73
74 /* PCTL_MCMD */
75 START_CMD = BIT(31),
76 MCMD_RANK0 = BIT(20),
77 MCMD_RANK1 = BIT(21),
78 DESELECT_CMD = 0,
79 PREA_CMD,
80 REF_CMD,
81 MRS_CMD,
82 ZQCS_CMD,
83 ZQCL_CMD,
84 RSTL_CMD,
85 MRR_CMD = 8,
86 DPDE_CMD,
87
88 /* PCTL_POWCTL */
89 POWER_UP_START = BIT(0),
90
91 /* PCTL_POWSTAT */
92 POWER_UP_DONE = BIT(0),
93
94 /* PCTL_SCTL */
95 INIT_STATE = 0,
96 CFG_STATE,
97 GO_STATE,
98 SLEEP_STATE,
99 WAKEUP_STATE,
100
101 /* PCTL_STAT */
102 LP_TRIG_SHIFT = 4,
103 LP_TRIG_MASK = 7,
104 PCTL_STAT_MSK = 7,
105 INIT_MEM = 0,
106 CONFIG,
107 CONFIG_REQ,
108 ACCESS,
109 ACCESS_REQ,
110 LOW_POWER,
111 LOW_POWER_ENTRY_REQ,
112 LOW_POWER_EXIT_REQ,
113
114 /* PCTL_MCFG */
115 DDR2_DDR3_BL_8 = BIT(0),
116 DDR3_EN = BIT(5),
117 TFAW_TRRD_MULT4 = (0 << 18),
118 TFAW_TRRD_MULT5 = (1 << 18),
119 TFAW_TRRD_MULT6 = (2 << 18),
120};
121
122#define DDR3_MR0_WR(n) \
123 ((n <= 8) ? ((n - 4) << 9) : (((n >> 1) & 0x7) << 9))
124#define DDR3_MR0_CL(n) \
125 ((((n - 4) & 0x7) << 4) | (((n - 4) & 0x8) >> 2))
126#define DDR3_MR0_BL8 \
127 (0 << 0)
128#define DDR3_MR0_DLL_RESET \
129 (1 << 8)
130#define DDR3_MR1_RTT120OHM \
131 ((0 << 9) | (1 << 6) | (0 << 2))
132#define DDR3_MR2_TWL(n) \
133 (((n - 5) & 0x7) << 3)
134
135
136#ifdef CONFIG_TPL_BUILD
137
138static void ddr_set_noc_spr_err_stall(struct rk3368_grf *grf, bool enable)
139{
140 if (enable)
141 rk_setreg(&grf->ddrc0_con0, NOC_RSP_ERR_STALL);
142 else
143 rk_clrreg(&grf->ddrc0_con0, NOC_RSP_ERR_STALL);
144}
145
146static void ddr_set_ddr3_mode(struct rk3368_grf *grf, bool ddr3_mode)
147{
148 if (ddr3_mode)
149 rk_setreg(&grf->ddrc0_con0, MSCH0_MAINDDR3_DDR3);
150 else
151 rk_clrreg(&grf->ddrc0_con0, MSCH0_MAINDDR3_DDR3);
152}
153
154static void ddrphy_config(struct rk3368_ddrphy *phy,
155 u32 tcl, u32 tal, u32 tcwl)
156{
157 int i;
158
159 /* Set to DDR3 mode */
160 clrsetbits_le32(&phy->reg[1], 0x3, 0x0);
161
162 /* DDRPHY_REGB: CL, AL */
163 clrsetbits_le32(&phy->reg[0xb], 0xff, tcl << 4 | tal);
164 /* DDRPHY_REGC: CWL */
165 clrsetbits_le32(&phy->reg[0xc], 0x0f, tcwl);
166
167 /* Update drive-strength */
168 writel(0xcc, &phy->reg[0x11]);
169 writel(0xaa, &phy->reg[0x16]);
170 /*
171 * Update NRCOMP/PRCOMP for all 4 channels (for details of all
172 * affected registers refer to the documentation of DDRPHY_REG20
173 * and DDRPHY_REG21 in the RK3368 TRM.
174 */
175 for (i = 0; i < 4; ++i) {
176 writel(0xcc, &phy->reg[0x20 + i * 0x10]);
177 writel(0x44, &phy->reg[0x21 + i * 0x10]);
178 }
179
180 /* Enable write-leveling calibration bypass */
181 setbits_le32(&phy->reg[2], BIT(3));
182}
183
184static void copy_to_reg(u32 *dest, const u32 *src, u32 n)
185{
186 int i;
187
188 for (i = 0; i < n / sizeof(u32); i++)
189 writel(*src++, dest++);
190}
191
192static void send_command(struct rk3368_ddr_pctl *pctl, u32 rank, u32 cmd)
193{
194 u32 mcmd = START_CMD | cmd | rank;
195
196 debug("%s: writing %x to MCMD\n", __func__, mcmd);
197 writel(mcmd, &pctl->mcmd);
198 while (readl(&pctl->mcmd) & START_CMD)
199 /* spin */;
200}
201
202static void send_mrs(struct rk3368_ddr_pctl *pctl,
203 u32 rank, u32 mr_num, u32 mr_data)
204{
205 u32 mcmd = START_CMD | MRS_CMD | rank | (mr_num << 17) | (mr_data << 4);
206
207 debug("%s: writing %x to MCMD\n", __func__, mcmd);
208 writel(mcmd, &pctl->mcmd);
209 while (readl(&pctl->mcmd) & START_CMD)
210 /* spin */;
211}
212
213static int memory_init(struct rk3368_ddr_pctl *pctl,
214 struct rk3368_sdram_params *params)
215{
216 u32 mr[4];
217 const ulong timeout_ms = 500;
218 ulong tmp;
219
220 /*
221 * Power up DRAM by DDR_PCTL_POWCTL[0] register of PCTL and
222 * wait power up DRAM finish with DDR_PCTL_POWSTAT[0] register
223 * of PCTL.
224 */
225 writel(POWER_UP_START, &pctl->powctl);
226
227 tmp = get_timer(0);
228 do {
229 if (get_timer(tmp) > timeout_ms) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900230 pr_err("%s: POWER_UP_START did not complete in %ld ms\n",
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200231 __func__, timeout_ms);
232 return -ETIME;
233 }
234 } while (!(readl(&pctl->powstat) & POWER_UP_DONE));
235
236 /* Configure MR0 through MR3 */
237 mr[0] = DDR3_MR0_WR(params->pctl_timing.twr) |
238 DDR3_MR0_CL(params->pctl_timing.tcl) |
239 DDR3_MR0_DLL_RESET;
240 mr[1] = DDR3_MR1_RTT120OHM;
241 mr[2] = DDR3_MR2_TWL(params->pctl_timing.tcwl);
242 mr[3] = 0;
243
244 /*
245 * Also see RK3368 Technical Reference Manual:
246 * "16.6.2 Initialization (DDR3 Initialization Sequence)"
247 */
248 send_command(pctl, MCMD_RANK0 | MCMD_RANK1, DESELECT_CMD);
249 udelay(1);
250 send_command(pctl, MCMD_RANK0 | MCMD_RANK1, PREA_CMD);
251 send_mrs(pctl, MCMD_RANK0 | MCMD_RANK1, 2, mr[2]);
252 send_mrs(pctl, MCMD_RANK0 | MCMD_RANK1, 3, mr[3]);
253 send_mrs(pctl, MCMD_RANK0 | MCMD_RANK1, 1, mr[1]);
254 send_mrs(pctl, MCMD_RANK0 | MCMD_RANK1, 0, mr[0]);
255 send_command(pctl, MCMD_RANK0 | MCMD_RANK1, ZQCL_CMD);
256
257 return 0;
258}
259
260static void move_to_config_state(struct rk3368_ddr_pctl *pctl)
261{
262 /*
263 * Also see RK3368 Technical Reference Manual:
264 * "16.6.1 State transition of PCTL (Moving to Config State)"
265 */
266 u32 state = readl(&pctl->stat) & PCTL_STAT_MSK;
267
268 switch (state) {
269 case LOW_POWER:
270 writel(WAKEUP_STATE, &pctl->sctl);
271 while ((readl(&pctl->stat) & PCTL_STAT_MSK) != ACCESS)
272 /* spin */;
273
274 /* fall-through */
275 case ACCESS:
276 case INIT_MEM:
277 writel(CFG_STATE, &pctl->sctl);
278 while ((readl(&pctl->stat) & PCTL_STAT_MSK) != CONFIG)
279 /* spin */;
280 break;
281
282 case CONFIG:
283 return;
284
285 default:
286 break;
287 }
288}
289
290static void move_to_access_state(struct rk3368_ddr_pctl *pctl)
291{
292 /*
293 * Also see RK3368 Technical Reference Manual:
294 * "16.6.1 State transition of PCTL (Moving to Access State)"
295 */
296 u32 state = readl(&pctl->stat) & PCTL_STAT_MSK;
297
298 switch (state) {
299 case LOW_POWER:
300 if (((readl(&pctl->stat) >> LP_TRIG_SHIFT) &
301 LP_TRIG_MASK) == 1)
302 return;
303
304 writel(WAKEUP_STATE, &pctl->sctl);
305 while ((readl(&pctl->stat) & PCTL_STAT_MSK) != ACCESS)
306 /* spin */;
307
308 /* fall-through */
309 case INIT_MEM:
310 writel(CFG_STATE, &pctl->sctl);
311 while ((readl(&pctl->stat) & PCTL_STAT_MSK) != CONFIG)
312 /* spin */;
313
314 /* fall-through */
315 case CONFIG:
316 writel(GO_STATE, &pctl->sctl);
317 while ((readl(&pctl->stat) & PCTL_STAT_MSK) == CONFIG)
318 /* spin */;
319 break;
320
321 case ACCESS:
322 return;
323
324 default:
325 break;
326 }
327}
328
329static void ddrctl_reset(struct rk3368_cru *cru)
330{
331 const u32 ctl_reset = BIT(3) | BIT(2);
332 const u32 phy_reset = BIT(1) | BIT(0);
333
334 /*
335 * The PHY reset should be released before the PCTL reset.
336 *
337 * Note that the following sequence (including the number of
338 * us to delay between releasing the PHY and PCTL reset) has
339 * been adapted per feedback received from Rockchips, so do
340 * not try to optimise.
341 */
342 rk_setreg(&cru->softrst_con[10], ctl_reset | phy_reset);
343 udelay(1);
344 rk_clrreg(&cru->softrst_con[10], phy_reset);
345 udelay(5);
346 rk_clrreg(&cru->softrst_con[10], ctl_reset);
347}
348
349static void ddrphy_reset(struct rk3368_ddrphy *ddrphy)
350{
351 /*
352 * The analog part of the PHY should be release at least 1000
353 * DRAM cycles before the digital part of the PHY (waiting for
354 * 5us will ensure this for a DRAM clock as low as 200MHz).
355 */
356 clrbits_le32(&ddrphy->reg[0], BIT(3) | BIT(2));
357 udelay(1);
358 setbits_le32(&ddrphy->reg[0], BIT(2));
359 udelay(5);
360 setbits_le32(&ddrphy->reg[0], BIT(3));
361}
362
363static void ddrphy_config_delays(struct rk3368_ddrphy *ddrphy, u32 freq)
364{
365 u32 dqs_dll_delay;
366
367 setbits_le32(&ddrphy->reg[0x13], BIT(4));
368 clrbits_le32(&ddrphy->reg[0x14], BIT(3));
369
370 setbits_le32(&ddrphy->reg[0x26], BIT(4));
371 clrbits_le32(&ddrphy->reg[0x27], BIT(3));
372
373 setbits_le32(&ddrphy->reg[0x36], BIT(4));
374 clrbits_le32(&ddrphy->reg[0x37], BIT(3));
375
376 setbits_le32(&ddrphy->reg[0x46], BIT(4));
377 clrbits_le32(&ddrphy->reg[0x47], BIT(3));
378
379 setbits_le32(&ddrphy->reg[0x56], BIT(4));
380 clrbits_le32(&ddrphy->reg[0x57], BIT(3));
381
382 if (freq <= 400000000)
383 setbits_le32(&ddrphy->reg[0xa4], 0x1f);
384 else
385 clrbits_le32(&ddrphy->reg[0xa4], 0x1f);
386
387 if (freq < 681000000)
388 dqs_dll_delay = 3; /* 67.5 degree delay */
389 else
390 dqs_dll_delay = 2; /* 45 degree delay */
391
392 writel(dqs_dll_delay, &ddrphy->reg[0x28]);
393 writel(dqs_dll_delay, &ddrphy->reg[0x38]);
394 writel(dqs_dll_delay, &ddrphy->reg[0x48]);
395 writel(dqs_dll_delay, &ddrphy->reg[0x58]);
396}
397
398static int dfi_cfg(struct rk3368_ddr_pctl *pctl)
399{
400 const ulong timeout_ms = 200;
401 ulong tmp;
402
403 writel(DFI_DATA_BYTE_DISABLE_EN, &pctl->dfistcfg0);
404
405 writel(DFI_DRAM_CLK_SR_EN | DFI_DRAM_CLK_DPD_EN,
406 &pctl->dfistcfg1);
407 writel(DFI_PARITY_INTR_EN | DFI_PARITY_EN, &pctl->dfistcfg2);
408 writel(7 << TLP_RESP_TIME_SHIFT | LP_SR_EN | LP_PD_EN,
409 &pctl->dfilpcfg0);
410
411 writel(1, &pctl->dfitphyupdtype0);
412
413 writel(0x1f, &pctl->dfitphyrdlat);
414 writel(0, &pctl->dfitphywrdata);
415 writel(0, &pctl->dfiupdcfg); /* phyupd and ctrlupd disabled */
416
417 setbits_le32(&pctl->dfistcfg0, DFI_INIT_START);
418
419 tmp = get_timer(0);
420 do {
421 if (get_timer(tmp) > timeout_ms) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900422 pr_err("%s: DFI init did not complete within %ld ms\n",
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200423 __func__, timeout_ms);
424 return -ETIME;
425 }
426 } while ((readl(&pctl->dfiststat0) & 1) == 0);
427
428 return 0;
429}
430
431static inline u32 ps_to_tCK(const u32 ps, const ulong freq)
432{
433 const ulong MHz = 1000000;
434 return DIV_ROUND_UP(ps * freq, 1000000 * MHz);
435}
436
437static inline u32 ns_to_tCK(const u32 ns, const ulong freq)
438{
439 return ps_to_tCK(ns * 1000, freq);
440}
441
442static inline u32 tCK_to_ps(const ulong tCK, const ulong freq)
443{
444 const ulong MHz = 1000000;
445 return DIV_ROUND_UP(tCK * 1000000 * MHz, freq);
446}
447
448static int pctl_calc_timings(struct rk3368_sdram_params *params,
449 ulong freq)
450{
451 struct rk3288_sdram_pctl_timing *pctl_timing = &params->pctl_timing;
452 const ulong MHz = 1000000;
453 u32 tccd;
454 u32 tfaw_as_ps;
455
456 if (params->ddr_speed_bin != DDR3_1600K) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900457 pr_err("%s: unimplemented DDR3 speed bin %d\n",
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200458 __func__, params->ddr_speed_bin);
459 return -1;
460 }
461
462 /* PCTL is clocked at 1/2 the DRAM clock; err on the side of caution */
463 pctl_timing->togcnt1u = DIV_ROUND_UP(freq, 2 * MHz);
464 pctl_timing->togcnt100n = DIV_ROUND_UP(freq / 10, 2 * MHz);
465
466 pctl_timing->tinit = 200; /* 200 usec */
467 pctl_timing->trsth = 500; /* 500 usec */
468 pctl_timing->trefi = 78; /* 7.8usec = 78 * 100ns */
469 params->trefi_mem_ddr3 = ns_to_tCK(pctl_timing->trefi * 100, freq);
470
471 if (freq <= (400 * MHz)) {
472 pctl_timing->tcl = 6;
473 pctl_timing->tcwl = 10;
474 } else if (freq <= (533 * MHz)) {
475 pctl_timing->tcl = 8;
476 pctl_timing->tcwl = 6;
477 } else if (freq <= (666 * MHz)) {
478 pctl_timing->tcl = 10;
479 pctl_timing->tcwl = 7;
480 } else {
481 pctl_timing->tcl = 11;
482 pctl_timing->tcwl = 8;
483 }
484
485 pctl_timing->tmrd = 4; /* 4 tCK (all speed bins) */
486 pctl_timing->trfc = ns_to_tCK(350, freq); /* tRFC: 350 (max) @ 8GBit */
487 pctl_timing->trp = max(4u, ps_to_tCK(13750, freq));
488 /*
489 * JESD-79:
490 * READ to WRITE Command Delay = RL + tCCD / 2 + 2tCK - WL
491 */
492 tccd = 4;
493 pctl_timing->trtw = pctl_timing->tcl + tccd/2 + 2 - pctl_timing->tcwl;
494 pctl_timing->tal = 0;
495 pctl_timing->tras = ps_to_tCK(35000, freq);
496 pctl_timing->trc = ps_to_tCK(48750, freq);
497 pctl_timing->trcd = ps_to_tCK(13750, freq);
498 pctl_timing->trrd = max(4u, ps_to_tCK(7500, freq));
499 pctl_timing->trtp = max(4u, ps_to_tCK(7500, freq));
500 pctl_timing->twr = ps_to_tCK(15000, freq);
501 /* The DDR3 mode-register does only support even values for tWR > 8. */
502 if (pctl_timing->twr > 8)
503 pctl_timing->twr = (pctl_timing->twr + 1) & ~1;
504 pctl_timing->twtr = max(4u, ps_to_tCK(7500, freq));
505 pctl_timing->texsr = 512; /* tEXSR(max) is tDLLLK */
506 pctl_timing->txp = max(3u, ps_to_tCK(6000, freq));
507 pctl_timing->txpdll = max(10u, ps_to_tCK(24000, freq));
508 pctl_timing->tzqcs = max(64u, ps_to_tCK(80000, freq));
509 pctl_timing->tzqcsi = 10000; /* as used by Rockchip */
510 pctl_timing->tdqs = 1; /* fixed for DDR3 */
511 pctl_timing->tcksre = max(5u, ps_to_tCK(10000, freq));
512 pctl_timing->tcksrx = max(5u, ps_to_tCK(10000, freq));
513 pctl_timing->tcke = max(3u, ps_to_tCK(5000, freq));
514 pctl_timing->tmod = max(12u, ps_to_tCK(15000, freq));
515 pctl_timing->trstl = ns_to_tCK(100, freq);
516 pctl_timing->tzqcl = max(256u, ps_to_tCK(320000, freq)); /* tZQoper */
517 pctl_timing->tmrr = 0;
518 pctl_timing->tckesr = pctl_timing->tcke + 1; /* JESD-79: tCKE + 1tCK */
519 pctl_timing->tdpd = 0; /* RK3368 TRM: "allowed values for DDR3: 0" */
520
521
522 /*
523 * The controller can represent tFAW as 4x, 5x or 6x tRRD only.
524 * We want to use the smallest multiplier that satisfies the tFAW
525 * requirements of the given speed-bin. If necessary, we stretch out
526 * tRRD to allow us to operate on a 6x multiplier for tFAW.
527 */
528 tfaw_as_ps = 40000; /* 40ns: tFAW for DDR3-1600K, 2KB page-size */
529 if (tCK_to_ps(pctl_timing->trrd * 6, freq) < tfaw_as_ps) {
530 /* If tFAW is > 6 x tRRD, we need to stretch tRRD */
531 pctl_timing->trrd = ps_to_tCK(DIV_ROUND_UP(40000, 6), freq);
532 params->tfaw_mult = TFAW_TRRD_MULT6;
533 } else if (tCK_to_ps(pctl_timing->trrd * 5, freq) < tfaw_as_ps) {
534 params->tfaw_mult = TFAW_TRRD_MULT6;
535 } else if (tCK_to_ps(pctl_timing->trrd * 4, freq) < tfaw_as_ps) {
536 params->tfaw_mult = TFAW_TRRD_MULT5;
537 } else {
538 params->tfaw_mult = TFAW_TRRD_MULT4;
539 }
540
541 return 0;
542}
543
544static void pctl_cfg(struct rk3368_ddr_pctl *pctl,
545 struct rk3368_sdram_params *params,
546 struct rk3368_grf *grf)
547{
548 /* Configure PCTL timing registers */
549 params->pctl_timing.trefi |= BIT(31); /* see PCTL_TREFI */
550 copy_to_reg(&pctl->togcnt1u, &params->pctl_timing.togcnt1u,
551 sizeof(params->pctl_timing));
552 writel(params->trefi_mem_ddr3, &pctl->trefi_mem_ddr3);
553
554 /* Set up ODT write selector and ODT write length */
555 writel((RANK0_ODT_WRITE_SEL | RANK1_ODT_WRITE_SEL), &pctl->dfiodtcfg);
556 writel(7 << ODT_LEN_BL8_W_SHIFT, &pctl->dfiodtcfg1);
557
558 /* Set up the CL/CWL-dependent timings of DFI */
559 writel((params->pctl_timing.tcl - 1) / 2 - 1, &pctl->dfitrddataen);
560 writel((params->pctl_timing.tcwl - 1) / 2 - 1, &pctl->dfitphywrlat);
561
562 /* DDR3 */
563 writel(params->tfaw_mult | DDR3_EN | DDR2_DDR3_BL_8, &pctl->mcfg);
564 writel(0x001c0004, &grf->ddrc0_con0);
565
566 setbits_le32(&pctl->scfg, HW_LOW_POWER_EN);
567}
568
569static int ddrphy_data_training(struct rk3368_ddr_pctl *pctl,
570 struct rk3368_ddrphy *ddrphy)
571{
572 const u32 trefi = readl(&pctl->trefi);
573 const ulong timeout_ms = 500;
574 ulong tmp;
575
576 /* disable auto-refresh */
577 writel(0 | BIT(31), &pctl->trefi);
578
579 clrsetbits_le32(&ddrphy->reg[2], 0x33, 0x20);
580 clrsetbits_le32(&ddrphy->reg[2], 0x33, 0x21);
581
582 tmp = get_timer(0);
583 do {
584 if (get_timer(tmp) > timeout_ms) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900585 pr_err("%s: did not complete within %ld ms\n",
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200586 __func__, timeout_ms);
587 return -ETIME;
588 }
589 } while ((readl(&ddrphy->reg[0xff]) & 0xf) != 0xf);
590
591 send_command(pctl, MCMD_RANK0 | MCMD_RANK1, PREA_CMD);
592 clrsetbits_le32(&ddrphy->reg[2], 0x33, 0x20);
593 /* resume auto-refresh */
594 writel(trefi | BIT(31), &pctl->trefi);
595
596 return 0;
597}
598
599static int sdram_col_row_detect(struct udevice *dev)
600{
601 struct dram_info *priv = dev_get_priv(dev);
602 struct rk3368_sdram_params *params = dev_get_platdata(dev);
603 struct rk3368_ddr_pctl *pctl = priv->pctl;
604 struct rk3368_msch *msch = priv->msch;
605 const u32 test_pattern = 0x5aa5f00f;
606 int row, col;
607 uintptr_t addr;
608
609 move_to_config_state(pctl);
610 writel(6, &msch->ddrconf);
611 move_to_access_state(pctl);
612
613 /* Detect col */
614 for (col = 11; col >= 9; col--) {
615 writel(0, CONFIG_SYS_SDRAM_BASE);
616 addr = CONFIG_SYS_SDRAM_BASE +
617 (1 << (col + params->chan.bw - 1));
618 writel(test_pattern, addr);
619 if ((readl(addr) == test_pattern) &&
620 (readl(CONFIG_SYS_SDRAM_BASE) == 0))
621 break;
622 }
623
624 if (col == 8) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900625 pr_err("%s: col detect error\n", __func__);
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200626 return -EINVAL;
627 }
628
629 move_to_config_state(pctl);
630 writel(15, &msch->ddrconf);
631 move_to_access_state(pctl);
632
633 /* Detect row*/
634 for (row = 16; row >= 12; row--) {
635 writel(0, CONFIG_SYS_SDRAM_BASE);
636 addr = CONFIG_SYS_SDRAM_BASE + (1 << (row + 15 - 1));
637 writel(test_pattern, addr);
638 if ((readl(addr) == test_pattern) &&
639 (readl(CONFIG_SYS_SDRAM_BASE) == 0))
640 break;
641 }
642
643 if (row == 11) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900644 pr_err("%s: row detect error\n", __func__);
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200645 return -EINVAL;
646 }
647
648 /* Record results */
649 debug("%s: col %d, row %d\n", __func__, col, row);
650 params->chan.col = col;
651 params->chan.cs0_row = row;
652 params->chan.cs1_row = row;
653 params->chan.row_3_4 = 0;
654
655 return 0;
656}
657
658static int msch_niu_config(struct rk3368_msch *msch,
659 struct rk3368_sdram_params *params)
660{
661 int i;
662 const u8 cols = params->chan.col - ((params->chan.bw == 2) ? 0 : 1);
663 const u8 rows = params->chan.cs0_row;
664
665 /*
666 * The DDR address-translation table always assumes a 32bit
667 * bus and the comparison below takes care of adjusting for
668 * a 16bit bus (i.e. one column-address is consumed).
669 */
670 const struct {
671 u8 rows;
672 u8 columns;
673 u8 type;
674 } ddrconf_table[] = {
675 /*
676 * C-B-R-D patterns are first. For these we require an
677 * exact match for the columns and rows (as there's
678 * one entry per possible configuration).
679 */
680 [0] = { .rows = 13, .columns = 10, .type = DMC_MSCH_CBRD },
681 [1] = { .rows = 14, .columns = 10, .type = DMC_MSCH_CBRD },
682 [2] = { .rows = 15, .columns = 10, .type = DMC_MSCH_CBRD },
683 [3] = { .rows = 16, .columns = 10, .type = DMC_MSCH_CBRD },
684 [4] = { .rows = 14, .columns = 11, .type = DMC_MSCH_CBRD },
685 [5] = { .rows = 15, .columns = 11, .type = DMC_MSCH_CBRD },
686 [6] = { .rows = 16, .columns = 11, .type = DMC_MSCH_CBRD },
687 [7] = { .rows = 13, .columns = 9, .type = DMC_MSCH_CBRD },
688 [8] = { .rows = 14, .columns = 9, .type = DMC_MSCH_CBRD },
689 [9] = { .rows = 15, .columns = 9, .type = DMC_MSCH_CBRD },
690 [10] = { .rows = 16, .columns = 9, .type = DMC_MSCH_CBRD },
691 /*
692 * 11 through 13 are C-R-B-D patterns. These are
693 * matched for an exact number of columns and to
694 * ensure that the hardware uses at least as many rows
695 * as the pattern requires (i.e. we make sure that
696 * there's no gaps up until we hit the device/chip-select;
697 * however, these patterns can accept up to 16 rows,
698 * as the row-address continues right after the CS
699 * switching)
700 */
701 [11] = { .rows = 15, .columns = 10, .type = DMC_MSCH_CRBD },
702 [12] = { .rows = 14, .columns = 11, .type = DMC_MSCH_CRBD },
703 [13] = { .rows = 13, .columns = 10, .type = DMC_MSCH_CRBD },
704 /*
705 * 14 and 15 are catch-all variants using a C-B-D-R
706 * scheme (i.e. alternating the chip-select every time
707 * C-B overflows) and stuffing the remaining C-bits
708 * into the top. Matching needs to make sure that the
709 * number of columns is either an exact match (i.e. we
710 * can use less the the maximum number of rows) -or-
711 * that the columns exceed what is given in this table
712 * and the rows are an exact match (in which case the
713 * remaining C-bits will be stuffed onto the top after
714 * the device/chip-select switches).
715 */
716 [14] = { .rows = 16, .columns = 10, .type = DMC_MSCH_CBDR },
717 [15] = { .rows = 16, .columns = 9, .type = DMC_MSCH_CBDR },
718 };
719
720 /*
721 * For C-B-R-D, we need an exact match (i.e. both for the number of
722 * columns and rows), while for C-B-D-R, only the the number of
723 * columns needs to match.
724 */
725 for (i = 0; i < ARRAY_SIZE(ddrconf_table); i++) {
726 bool match = false;
727
728 /* If this entry if for a different matcher, then skip it */
729 if (ddrconf_table[i].type != params->memory_schedule)
730 continue;
731
732 /*
733 * Match according to the rules (exact/inexact/at-least)
734 * documented in the ddrconf_table above.
735 */
736 switch (params->memory_schedule) {
737 case DMC_MSCH_CBRD:
738 match = (ddrconf_table[i].columns == cols) &&
739 (ddrconf_table[i].rows == rows);
740 break;
741
742 case DMC_MSCH_CRBD:
743 match = (ddrconf_table[i].columns == cols) &&
744 (ddrconf_table[i].rows <= rows);
745 break;
746
747 case DMC_MSCH_CBDR:
748 match = (ddrconf_table[i].columns == cols) ||
749 ((ddrconf_table[i].columns <= cols) &&
750 (ddrconf_table[i].rows == rows));
751 break;
752
753 default:
754 break;
755 }
756
757 if (match) {
758 debug("%s: setting ddrconf 0x%x\n", __func__, i);
759 writel(i, &msch->ddrconf);
760 return 0;
761 }
762 }
763
Masahiro Yamada81e10422017-09-16 14:10:41 +0900764 pr_err("%s: ddrconf (NIU config) not found\n", __func__);
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200765 return -EINVAL;
766}
767
768static void dram_all_config(struct udevice *dev)
769{
770 struct dram_info *priv = dev_get_priv(dev);
771 struct rk3368_pmu_grf *pmugrf = priv->pmugrf;
772 struct rk3368_sdram_params *params = dev_get_platdata(dev);
773 const struct rk3288_sdram_channel *info = &params->chan;
774 u32 sys_reg = 0;
775 const int chan = 0;
776
777 sys_reg |= DDR3 << SYS_REG_DDRTYPE_SHIFT;
778 sys_reg |= 0 << SYS_REG_NUM_CH_SHIFT;
779
780 sys_reg |= info->row_3_4 << SYS_REG_ROW_3_4_SHIFT(chan);
781 sys_reg |= 1 << SYS_REG_CHINFO_SHIFT(chan);
782 sys_reg |= (info->rank - 1) << SYS_REG_RANK_SHIFT(chan);
783 sys_reg |= (info->col - 9) << SYS_REG_COL_SHIFT(chan);
784 sys_reg |= info->bk == 3 ? 0 : 1 << SYS_REG_BK_SHIFT(chan);
785 sys_reg |= (info->cs0_row - 13) << SYS_REG_CS0_ROW_SHIFT(chan);
786 sys_reg |= (info->cs1_row - 13) << SYS_REG_CS1_ROW_SHIFT(chan);
787 sys_reg |= (2 >> info->bw) << SYS_REG_BW_SHIFT(chan);
788 sys_reg |= (2 >> info->dbw) << SYS_REG_DBW_SHIFT(chan);
789
790 writel(sys_reg, &pmugrf->os_reg[2]);
791}
792
793static int setup_sdram(struct udevice *dev)
794{
795 struct dram_info *priv = dev_get_priv(dev);
796 struct rk3368_sdram_params *params = dev_get_platdata(dev);
797
798 struct rk3368_ddr_pctl *pctl = priv->pctl;
799 struct rk3368_ddrphy *ddrphy = priv->phy;
800 struct rk3368_cru *cru = priv->cru;
801 struct rk3368_grf *grf = priv->grf;
802 struct rk3368_msch *msch = priv->msch;
803
804 int ret;
805
806 /* The input clock (i.e. DPLL) needs to be 2x the DRAM frequency */
807 ret = clk_set_rate(&priv->ddr_clk, 2 * params->ddr_freq);
808 if (ret < 0) {
809 debug("%s: could not set DDR clock: %d\n", __func__, ret);
810 return ret;
811 }
812
813 /* Update the read-latency for the RK3368 */
814 writel(0x32, &msch->readlatency);
815
816 /* Initialise the DDR PCTL and DDR PHY */
817 ddrctl_reset(cru);
818 ddrphy_reset(ddrphy);
819 ddrphy_config_delays(ddrphy, params->ddr_freq);
820 dfi_cfg(pctl);
821 /* Configure relative system information of grf_ddrc0_con0 register */
822 ddr_set_ddr3_mode(grf, true);
823 ddr_set_noc_spr_err_stall(grf, true);
824 /* Calculate timings */
825 pctl_calc_timings(params, params->ddr_freq);
826 /* Initialise the device timings in protocol controller */
827 pctl_cfg(pctl, params, grf);
828 /* Configure AL, CL ... information of PHY registers */
829 ddrphy_config(ddrphy,
830 params->pctl_timing.tcl,
831 params->pctl_timing.tal,
832 params->pctl_timing.tcwl);
833
834 /* Initialize DRAM and configure with mode-register values */
835 ret = memory_init(pctl, params);
836 if (ret)
837 goto error;
838
839 move_to_config_state(pctl);
840 /* Perform data-training */
841 ddrphy_data_training(pctl, ddrphy);
842 move_to_access_state(pctl);
843
844 /* TODO(prt): could detect rank in training... */
845 params->chan.rank = 2;
846 /* TODO(prt): bus width is not auto-detected (yet)... */
847 params->chan.bw = 2; /* 32bit wide bus */
848 params->chan.dbw = params->chan.dbw; /* 32bit wide bus */
849
850 /* DDR3 is always 8 bank */
851 params->chan.bk = 3;
852 /* Detect col and row number */
853 ret = sdram_col_row_detect(dev);
854 if (ret)
855 goto error;
856
857 /* Configure NIU DDR configuration */
858 ret = msch_niu_config(msch, params);
859 if (ret)
860 goto error;
861
862 /* set up OS_REG to communicate w/ next stage and OS */
863 dram_all_config(dev);
864
865 return 0;
866
867error:
868 printf("DRAM init failed!\n");
869 hang();
870}
871#endif
872
873static int rk3368_dmc_ofdata_to_platdata(struct udevice *dev)
874{
875 int ret = 0;
876
877#if !CONFIG_IS_ENABLED(OF_PLATDATA)
878 struct rk3368_sdram_params *plat = dev_get_platdata(dev);
879
Masahiro Yamadae4873e32018-04-19 12:14:03 +0900880 ret = regmap_init_mem(dev_ofnode(dev), &plat->map);
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200881 if (ret)
882 return ret;
883#endif
884
885 return ret;
886}
887
888#if CONFIG_IS_ENABLED(OF_PLATDATA)
889static int conv_of_platdata(struct udevice *dev)
890{
891 struct rk3368_sdram_params *plat = dev_get_platdata(dev);
892 struct dtd_rockchip_rk3368_dmc *of_plat = &plat->of_plat;
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200893
894 plat->ddr_freq = of_plat->rockchip_ddr_frequency;
895 plat->ddr_speed_bin = of_plat->rockchip_ddr_speed_bin;
896 plat->memory_schedule = of_plat->rockchip_memory_schedule;
897
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200898 return 0;
899}
900#endif
901
902static int rk3368_dmc_probe(struct udevice *dev)
903{
904#ifdef CONFIG_TPL_BUILD
905 struct rk3368_sdram_params *plat = dev_get_platdata(dev);
906 struct rk3368_ddr_pctl *pctl;
907 struct rk3368_ddrphy *ddrphy;
908 struct rk3368_cru *cru;
909 struct rk3368_grf *grf;
910 struct rk3368_msch *msch;
911 int ret;
912 struct udevice *dev_clk;
913#endif
914 struct dram_info *priv = dev_get_priv(dev);
915
916#if CONFIG_IS_ENABLED(OF_PLATDATA)
917 ret = conv_of_platdata(dev);
918 if (ret)
919 return ret;
920#endif
921
922 priv->pmugrf = syscon_get_first_range(ROCKCHIP_SYSCON_PMUGRF);
923 debug("%s: pmugrf=%p\n", __func__, priv->pmugrf);
924
925#ifdef CONFIG_TPL_BUILD
Philipp Tomsich4e2fe8f2017-08-14 19:05:32 +0200926 pctl = (struct rk3368_ddr_pctl *)plat->of_plat.reg[0];
927 ddrphy = (struct rk3368_ddrphy *)plat->of_plat.reg[2];
Philipp Tomsichd21a4d82017-06-23 00:12:05 +0200928 msch = syscon_get_first_range(ROCKCHIP_SYSCON_MSCH);
929 grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF);
930
931 priv->pctl = pctl;
932 priv->phy = ddrphy;
933 priv->msch = msch;
934 priv->grf = grf;
935
936 ret = rockchip_get_clk(&dev_clk);
937 if (ret)
938 return ret;
939 priv->ddr_clk.id = CLK_DDR;
940 ret = clk_request(dev_clk, &priv->ddr_clk);
941 if (ret)
942 return ret;
943
944 cru = rockchip_get_cru();
945 priv->cru = cru;
946 if (IS_ERR(priv->cru))
947 return PTR_ERR(priv->cru);
948
949 ret = setup_sdram(dev);
950 if (ret)
951 return ret;
952#endif
953
954 priv->info.base = 0;
955 priv->info.size =
956 rockchip_sdram_size((phys_addr_t)&priv->pmugrf->os_reg[2]);
957
958 /*
959 * we use the 0x00000000~0xfdffffff space since 0xff000000~0xffffffff
960 * is SoC register space (i.e. reserved), and 0xfe000000~0xfeffffff is
961 * inaccessible for some IP controller.
962 */
963 priv->info.size = min(priv->info.size, (size_t)0xfe000000);
964
965 return 0;
966}
967
968static int rk3368_dmc_get_info(struct udevice *dev, struct ram_info *info)
969{
970 struct dram_info *priv = dev_get_priv(dev);
971
972 *info = priv->info;
973 return 0;
974}
975
976static struct ram_ops rk3368_dmc_ops = {
977 .get_info = rk3368_dmc_get_info,
978};
979
980
981static const struct udevice_id rk3368_dmc_ids[] = {
982 { .compatible = "rockchip,rk3368-dmc" },
983 { }
984};
985
986U_BOOT_DRIVER(dmc_rk3368) = {
987 .name = "rockchip_rk3368_dmc",
988 .id = UCLASS_RAM,
989 .of_match = rk3368_dmc_ids,
990 .ops = &rk3368_dmc_ops,
991 .probe = rk3368_dmc_probe,
992 .priv_auto_alloc_size = sizeof(struct dram_info),
993 .ofdata_to_platdata = rk3368_dmc_ofdata_to_platdata,
994 .probe = rk3368_dmc_probe,
995 .priv_auto_alloc_size = sizeof(struct dram_info),
996 .platdata_auto_alloc_size = sizeof(struct rk3368_sdram_params),
997};