blob: bb7acdee1eb08dacdee59af8cc841cf498b02723 [file] [log] [blame]
Masahiro Yamada95e74872016-01-09 01:51:14 +09001/*
2 * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
3 *
4 * based on commit 21b6e480f92ccc38fe0502e3116411d6509d3bf2 of Diag by:
5 * Copyright (C) 2015 Socionext Inc.
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10#include <common.h>
11#include <linux/err.h>
12#include <linux/io.h>
13#include <linux/sizes.h>
14#include <asm/processor.h>
15
16#include "../init.h"
17#include "../soc-info.h"
18#include "ddrmphy-regs.h"
Masahiro Yamada99d80982016-01-17 15:03:29 +090019#include "umc-regs.h"
Masahiro Yamada95e74872016-01-09 01:51:14 +090020
21enum dram_freq {
22 FREQ_1866M,
23 FREQ_2133M,
24 FREQ_NR,
25};
26
27enum dram_size {
28 SIZE_0,
29 SIZE_512M,
30 SIZE_1G,
31 SIZE_NR,
32};
33
34static u32 ddrphy_pgcr2[FREQ_NR] = {0x00FC7E5D, 0x00FC90AB};
35static u32 ddrphy_ptr0[FREQ_NR] = {0x0EA09205, 0x10C0A6C6};
36static u32 ddrphy_ptr1[FREQ_NR] = {0x0DAC041B, 0x0FA104B1};
37static u32 ddrphy_ptr3[FREQ_NR] = {0x15171e45, 0x18182357};
38static u32 ddrphy_ptr4[FREQ_NR] = {0x0e9ad8e9, 0x10b34157};
39static u32 ddrphy_dtpr0[FREQ_NR] = {0x35a00d88, 0x39e40e88};
40static u32 ddrphy_dtpr1[FREQ_NR] = {0x2288cc2c, 0x228a04d0};
41static u32 ddrphy_dtpr2[FREQ_NR] = {0x50005e00, 0x50006a00};
42static u32 ddrphy_dtpr3[FREQ_NR] = {0x0010cb49, 0x0010ec89};
43static u32 ddrphy_mr0[FREQ_NR] = {0x00000115, 0x00000125};
44static u32 ddrphy_mr2[FREQ_NR] = {0x000002a0, 0x000002a8};
45
46static u32 umc_cmdctla[FREQ_NR] = {0x66DD131D, 0x77EE1722};
47/*
48 * The ch2 is a different generation UMC core.
49 * The register spec is different, unfortunately.
50 */
51static u32 umc_cmdctlb_ch01[FREQ_NR] = {0x13E87C44, 0x18F88C44};
52static u32 umc_cmdctlb_ch2[FREQ_NR] = {0x19E8DC44, 0x1EF8EC44};
53static u32 umc_spcctla[FREQ_NR][SIZE_NR] = {
54 {0x00000000, 0x004A071D, 0x0078071D},
55 {0x00000000, 0x0055081E, 0x0089081E},
56};
57
58static u32 umc_spcctlb[] = {0x00FF000A, 0x00FF000B};
59/* The ch2 is different for some reason only hardware guys know... */
60static u32 umc_flowctla_ch01[] = {0x0800001E, 0x08000022};
61static u32 umc_flowctla_ch2[] = {0x0800001E, 0x0800001E};
62
63/* DDR multiPHY */
64static inline int ddrphy_get_rank(int dx)
65{
66 return dx / 2;
67}
68
69static void ddrphy_fifo_reset(void __iomem *phy_base)
70{
71 u32 tmp;
72
73 tmp = readl(phy_base + DMPHY_PGCR0);
74 tmp &= ~DMPHY_PGCR0_PHYFRST;
75 writel(tmp, phy_base + DMPHY_PGCR0);
76
77 udelay(1);
78
79 tmp |= DMPHY_PGCR0_PHYFRST;
80 writel(tmp, phy_base + DMPHY_PGCR0);
81
82 udelay(1);
83}
84
85static void ddrphy_vt_ctrl(void __iomem *phy_base, int enable)
86{
87 u32 tmp;
88
89 tmp = readl(phy_base + DMPHY_PGCR1);
90
91 if (enable)
92 tmp &= ~DMPHY_PGCR1_INHVT;
93 else
94 tmp |= DMPHY_PGCR1_INHVT;
95
96 writel(tmp, phy_base + DMPHY_PGCR1);
97
98 if (!enable) {
99 while (!(readl(phy_base + DMPHY_PGSR1) & DMPHY_PGSR1_VTSTOP))
100 cpu_relax();
101 }
102}
103
104static void ddrphy_dqs_delay_fixup(void __iomem *phy_base, int nr_dx, int step)
105{
106 int dx;
107 u32 lcdlr1, rdqsd;
108 void __iomem *dx_base = phy_base + DMPHY_DX_BASE;
109
110 ddrphy_vt_ctrl(phy_base, 0);
111
112 for (dx = 0; dx < nr_dx; dx++) {
113 lcdlr1 = readl(dx_base + DMPHY_DX_LCDLR1);
114 rdqsd = (lcdlr1 >> 8) & 0xff;
115 rdqsd = clamp(rdqsd + step, 0U, 0xffU);
116 lcdlr1 = (lcdlr1 & ~(0xff << 8)) | (rdqsd << 8);
117 writel(lcdlr1, dx_base + DMPHY_DX_LCDLR1);
118 readl(dx_base + DMPHY_DX_LCDLR1); /* relax */
119 dx_base += DMPHY_DX_STRIDE;
120 }
121
122 ddrphy_vt_ctrl(phy_base, 1);
123}
124
125static int ddrphy_get_system_latency(void __iomem *phy_base, int width)
126{
127 void __iomem *dx_base = phy_base + DMPHY_DX_BASE;
128 const int nr_dx = width / 8;
129 int dx, rank;
130 u32 gtr;
131 int dgsl, dgsl_min = INT_MAX, dgsl_max = 0;
132
133 for (dx = 0; dx < nr_dx; dx++) {
134 gtr = readl(dx_base + DMPHY_DX_GTR);
135 for (rank = 0; rank < 4; rank++) {
136 dgsl = gtr & 0x7;
137 /* if dgsl is zero, this rank was not trained. skip. */
138 if (dgsl) {
139 dgsl_min = min(dgsl_min, dgsl);
140 dgsl_max = max(dgsl_max, dgsl);
141 }
142 gtr >>= 3;
143 }
144 dx_base += DMPHY_DX_STRIDE;
145 }
146
147 if (dgsl_min != dgsl_max)
148 printf("DQS Gateing System Latencies are not all leveled.\n");
149
150 return dgsl_max;
151}
152
153static void ddrphy_init(void __iomem *phy_base, enum dram_freq freq, int width)
154{
155 u32 tmp;
156 void __iomem *zq_base, *dx_base;
157 int zq, dx;
158 int nr_dx;
159
160 nr_dx = width / 8;
161
162 writel(DMPHY_PIR_ZCALBYP, phy_base + DMPHY_PIR);
163 /*
164 * Disable RGLVT bit (Read DQS Gating LCDL Delay VT Compensation)
165 * to avoid read error issue.
166 */
167 writel(0x07d81e37, phy_base + DMPHY_PGCR0);
168 writel(0x0200c4e0, phy_base + DMPHY_PGCR1);
169
170 tmp = ddrphy_pgcr2[freq];
171 if (width >= 32)
172 tmp |= DMPHY_PGCR2_DUALCHN | DMPHY_PGCR2_ACPDDC;
173 writel(tmp, phy_base + DMPHY_PGCR2);
174
175 writel(ddrphy_ptr0[freq], phy_base + DMPHY_PTR0);
176 writel(ddrphy_ptr1[freq], phy_base + DMPHY_PTR1);
177 writel(0x00083def, phy_base + DMPHY_PTR2);
178 writel(ddrphy_ptr3[freq], phy_base + DMPHY_PTR3);
179 writel(ddrphy_ptr4[freq], phy_base + DMPHY_PTR4);
180
181 writel(0x55555555, phy_base + DMPHY_ACIOCR1);
182 writel(0x00000000, phy_base + DMPHY_ACIOCR2);
183 writel(0x55555555, phy_base + DMPHY_ACIOCR3);
184 writel(0x00000000, phy_base + DMPHY_ACIOCR4);
185 writel(0x00000055, phy_base + DMPHY_ACIOCR5);
186 writel(0x00181aa4, phy_base + DMPHY_DXCCR);
187
188 writel(0x0024641e, phy_base + DMPHY_DSGCR);
189 writel(0x0000040b, phy_base + DMPHY_DCR);
190 writel(ddrphy_dtpr0[freq], phy_base + DMPHY_DTPR0);
191 writel(ddrphy_dtpr1[freq], phy_base + DMPHY_DTPR1);
192 writel(ddrphy_dtpr2[freq], phy_base + DMPHY_DTPR2);
193 writel(ddrphy_dtpr3[freq], phy_base + DMPHY_DTPR3);
194 writel(ddrphy_mr0[freq], phy_base + DMPHY_MR0);
195 writel(0x00000006, phy_base + DMPHY_MR1);
196 writel(ddrphy_mr2[freq], phy_base + DMPHY_MR2);
197 writel(0x00000000, phy_base + DMPHY_MR3);
198
199 tmp = 0;
200 for (dx = 0; dx < nr_dx; dx++)
201 tmp |= BIT(DMPHY_DTCR_RANKEN_SHIFT + ddrphy_get_rank(dx));
202 writel(0x90003087 | tmp, phy_base + DMPHY_DTCR);
203
204 writel(0x00000000, phy_base + DMPHY_DTAR0);
205 writel(0x00000008, phy_base + DMPHY_DTAR1);
206 writel(0x00000010, phy_base + DMPHY_DTAR2);
207 writel(0x00000018, phy_base + DMPHY_DTAR3);
208 writel(0xdd22ee11, phy_base + DMPHY_DTDR0);
209 writel(0x7788bb44, phy_base + DMPHY_DTDR1);
210
211 /* impedance control settings */
212 writel(0x04048900, phy_base + DMPHY_ZQCR);
213
214 zq_base = phy_base + DMPHY_ZQ_BASE;
215 for (zq = 0; zq < 4; zq++) {
216 /*
217 * board-dependent
218 * PXS2: CH0ZQ0=0x5B, CH1ZQ0=0x5B, CH2ZQ0=0x59, others=0x5D
219 */
220 writel(0x0007BB5D, zq_base + DMPHY_ZQ_PR);
221 zq_base += DMPHY_ZQ_STRIDE;
222 }
223
224 /* DATX8 settings */
225 dx_base = phy_base + DMPHY_DX_BASE;
226 for (dx = 0; dx < 4; dx++) {
227 tmp = readl(dx_base + DMPHY_DX_GCR0);
228 tmp &= ~DMPHY_DX_GCR0_WLRKEN_MASK;
229 tmp |= BIT(DMPHY_DX_GCR0_WLRKEN_SHIFT + ddrphy_get_rank(dx)) &
230 DMPHY_DX_GCR0_WLRKEN_MASK;
231 writel(tmp, dx_base + DMPHY_DX_GCR0);
232
233 writel(0x00000000, dx_base + DMPHY_DX_GCR1);
234 writel(0x00000000, dx_base + DMPHY_DX_GCR2);
235 writel(0x00000000, dx_base + DMPHY_DX_GCR3);
236 dx_base += DMPHY_DX_STRIDE;
237 }
238
239 while (!(readl(phy_base + DMPHY_PGSR0) & DMPHY_PGSR0_IDONE))
240 cpu_relax();
241
242 ddrphy_dqs_delay_fixup(phy_base, nr_dx, -4);
243}
244
245struct ddrphy_init_sequence {
246 char *description;
247 u32 init_flag;
248 u32 done_flag;
249 u32 err_flag;
250};
251
252static const struct ddrphy_init_sequence impedance_calibration_sequence[] = {
253 {
254 "Impedance Calibration",
255 DMPHY_PIR_ZCAL,
256 DMPHY_PGSR0_ZCDONE,
257 DMPHY_PGSR0_ZCERR,
258 },
259 { /* sentinel */ }
260};
261
262static const struct ddrphy_init_sequence dram_init_sequence[] = {
263 {
264 "DRAM Initialization",
265 DMPHY_PIR_DRAMRST | DMPHY_PIR_DRAMINIT,
266 DMPHY_PGSR0_DIDONE,
267 0,
268 },
269 { /* sentinel */ }
270};
271
272static const struct ddrphy_init_sequence training_sequence[] = {
273 {
274 "Write Leveling",
275 DMPHY_PIR_WL,
276 DMPHY_PGSR0_WLDONE,
277 DMPHY_PGSR0_WLERR,
278 },
279 {
280 "Read DQS Gate Training",
281 DMPHY_PIR_QSGATE,
282 DMPHY_PGSR0_QSGDONE,
283 DMPHY_PGSR0_QSGERR,
284 },
285 {
286 "Write Leveling Adjustment",
287 DMPHY_PIR_WLADJ,
288 DMPHY_PGSR0_WLADONE,
289 DMPHY_PGSR0_WLAERR,
290 },
291 {
292 "Read Bit Deskew",
293 DMPHY_PIR_RDDSKW,
294 DMPHY_PGSR0_RDDONE,
295 DMPHY_PGSR0_RDERR,
296 },
297 {
298 "Write Bit Deskew",
299 DMPHY_PIR_WRDSKW,
300 DMPHY_PGSR0_WDDONE,
301 DMPHY_PGSR0_WDERR,
302 },
303 {
304 "Read Eye Training",
305 DMPHY_PIR_RDEYE,
306 DMPHY_PGSR0_REDONE,
307 DMPHY_PGSR0_REERR,
308 },
309 {
310 "Write Eye Training",
311 DMPHY_PIR_WREYE,
312 DMPHY_PGSR0_WEDONE,
313 DMPHY_PGSR0_WEERR,
314 },
315 { /* sentinel */ }
316};
317
318static int __ddrphy_training(void __iomem *phy_base,
319 const struct ddrphy_init_sequence *seq)
320{
321 const struct ddrphy_init_sequence *s;
322 u32 pgsr0;
323 u32 init_flag = DMPHY_PIR_INIT;
324 u32 done_flag = DMPHY_PGSR0_IDONE;
325 int timeout = 50000; /* 50 msec is long enough */
326#ifdef DISPLAY_ELAPSED_TIME
327 ulong start = get_timer(0);
328#endif
329
330 for (s = seq; s->description; s++) {
331 init_flag |= s->init_flag;
332 done_flag |= s->done_flag;
333 }
334
335 writel(init_flag, phy_base + DMPHY_PIR);
336
337 do {
338 if (--timeout < 0) {
339 printf("%s: error: timeout during DDR training\n",
340 __func__);
341 return -ETIMEDOUT;
342 }
343 udelay(1);
344 pgsr0 = readl(phy_base + DMPHY_PGSR0);
345 } while ((pgsr0 & done_flag) != done_flag);
346
347 for (s = seq; s->description; s++) {
348 if (pgsr0 & s->err_flag) {
349 printf("%s: error: %s failed\n", __func__,
350 s->description);
351 return -EIO;
352 }
353 }
354
355#ifdef DISPLAY_ELAPSED_TIME
356 printf("%s: info: elapsed time %ld msec\n", get_timer(start));
357#endif
358
359 return 0;
360}
361
362static int ddrphy_impedance_calibration(void __iomem *phy_base)
363{
364 int ret;
365 u32 tmp;
366
367 ret = __ddrphy_training(phy_base, impedance_calibration_sequence);
368 if (ret)
369 return ret;
370
371 /*
372 * Because of a hardware bug, IDONE flag is set when the first ZQ block
373 * is calibrated. The flag does not guarantee the completion for all
374 * the ZQ blocks. Wait a little more just in case.
375 */
376 udelay(1);
377
378 /* reflect ZQ settings and enable average algorithm*/
379 tmp = readl(phy_base + DMPHY_ZQCR);
380 tmp |= DMPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
381 writel(tmp, phy_base + DMPHY_ZQCR);
382 tmp &= ~DMPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
383 tmp |= DMPHY_ZQCR_AVGEN;
384 writel(tmp, phy_base + DMPHY_ZQCR);
385
386 return 0;
387}
388
389static int ddrphy_dram_init(void __iomem *phy_base)
390{
391 return __ddrphy_training(phy_base, dram_init_sequence);
392}
393
394static int ddrphy_training(void __iomem *phy_base)
395{
396 return __ddrphy_training(phy_base, training_sequence);
397}
398
399/* UMC */
400static void umc_set_system_latency(void __iomem *umc_dc_base, int phy_latency)
401{
402 u32 val;
403 int latency;
404
405 val = readl(umc_dc_base + UMC_RDATACTL_D0);
406 latency = (val & UMC_RDATACTL_RADLTY_MASK) >> UMC_RDATACTL_RADLTY_SHIFT;
407 latency += (val & UMC_RDATACTL_RAD2LTY_MASK) >>
408 UMC_RDATACTL_RAD2LTY_SHIFT;
409 /*
410 * UMC works at the half clock rate of the PHY.
411 * The LSB of latency is ignored
412 */
413 latency += phy_latency & ~1;
414
415 val &= ~(UMC_RDATACTL_RADLTY_MASK | UMC_RDATACTL_RAD2LTY_MASK);
416 if (latency > 0xf) {
417 val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
418 val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
419 } else {
420 val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
421 }
422
423 writel(val, umc_dc_base + UMC_RDATACTL_D0);
424 writel(val, umc_dc_base + UMC_RDATACTL_D1);
425
426 readl(umc_dc_base + UMC_RDATACTL_D1); /* relax */
427}
428
429/* enable/disable auto refresh */
430void umc_refresh_ctrl(void __iomem *umc_dc_base, int enable)
431{
432 u32 tmp;
433
434 tmp = readl(umc_dc_base + UMC_SPCSETB);
435 tmp &= ~UMC_SPCSETB_AREFMD_MASK;
436
437 if (enable)
438 tmp |= UMC_SPCSETB_AREFMD_ARB;
439 else
440 tmp |= UMC_SPCSETB_AREFMD_REG;
441
442 writel(tmp, umc_dc_base + UMC_SPCSETB);
443 udelay(1);
444}
445
446static void umc_ud_init(void __iomem *umc_base, int ch)
447{
448 writel(0x00000003, umc_base + UMC_BITPERPIXELMODE_D0);
449
450 if (ch == 2)
451 writel(0x00000033, umc_base + UMC_PAIR1DOFF_D0);
452}
453
454static void umc_dc_init(void __iomem *umc_dc_base, enum dram_freq freq,
455 enum dram_size size, int ch, int width)
456{
457 int latency;
458 u32 val;
459
460 writel(umc_cmdctla[freq], umc_dc_base + UMC_CMDCTLA);
461
462 writel(ch == 2 ? umc_cmdctlb_ch2[freq] : umc_cmdctlb_ch01[freq],
463 umc_dc_base + UMC_CMDCTLB);
464
465 writel(umc_spcctla[freq][size / (width / 16)],
466 umc_dc_base + UMC_SPCCTLA);
467 writel(umc_spcctlb[freq], umc_dc_base + UMC_SPCCTLB);
468
469 val = 0x000e000e;
470 latency = 12;
471 /* ES2 inserted one more FF to the logic. */
472 if (uniphier_get_soc_model() >= 2)
473 latency += 2;
474
475 if (latency > 0xf) {
476 val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
477 val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
478 } else {
479 val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
480 }
481
482 writel(val, umc_dc_base + UMC_RDATACTL_D0);
483 if (width >= 32)
484 writel(val, umc_dc_base + UMC_RDATACTL_D1);
485
486 writel(0x04060A02, umc_dc_base + UMC_WDATACTL_D0);
487 if (width >= 32)
488 writel(0x04060A02, umc_dc_base + UMC_WDATACTL_D1);
489 writel(0x04000000, umc_dc_base + UMC_DATASET);
490 writel(0x00400020, umc_dc_base + UMC_DCCGCTL);
491 writel(0x00000084, umc_dc_base + UMC_FLOWCTLG);
492 writel(0x00000000, umc_dc_base + UMC_ACSSETA);
493
494 writel(ch == 2 ? umc_flowctla_ch2[freq] : umc_flowctla_ch01[freq],
495 umc_dc_base + UMC_FLOWCTLA);
496
497 writel(0x00004400, umc_dc_base + UMC_FLOWCTLC);
498 writel(0x200A0A00, umc_dc_base + UMC_SPCSETB);
499 writel(0x00000520, umc_dc_base + UMC_DFICUPDCTLA);
500 writel(0x0000000D, umc_dc_base + UMC_RESPCTL);
501
502 if (ch != 2) {
503 writel(0x00202000, umc_dc_base + UMC_FLOWCTLB);
504 writel(0xFDBFFFFF, umc_dc_base + UMC_FLOWCTLOB0);
505 writel(0xFFFFFFFF, umc_dc_base + UMC_FLOWCTLOB1);
506 writel(0x00080700, umc_dc_base + UMC_BSICMAPSET);
507 } else {
508 writel(0x00200000, umc_dc_base + UMC_FLOWCTLB);
509 writel(0x00000000, umc_dc_base + UMC_BSICMAPSET);
510 }
511
512 writel(0x00000000, umc_dc_base + UMC_ERRMASKA);
513 writel(0x00000000, umc_dc_base + UMC_ERRMASKB);
514}
515
516static int umc_init(void __iomem *umc_base, enum dram_freq freq, int ch,
517 enum dram_size size, int width)
518{
519 void __iomem *umc_dc_base = umc_base + 0x00011000;
520 void __iomem *phy_base = umc_base + 0x00030000;
521 int ret;
522
523 writel(0x00000002, umc_dc_base + UMC_INITSET);
524 while (readl(umc_dc_base + UMC_INITSTAT) & BIT(2))
525 cpu_relax();
526
527 /* deassert PHY reset signals */
528 writel(UMC_DIOCTLA_CTL_NRST | UMC_DIOCTLA_CFG_NRST,
529 umc_dc_base + UMC_DIOCTLA);
530
531 ddrphy_init(phy_base, freq, width);
532
533 ret = ddrphy_impedance_calibration(phy_base);
534 if (ret)
535 return ret;
536
537 ddrphy_dram_init(phy_base);
538 if (ret)
539 return ret;
540
541 umc_dc_init(umc_dc_base, freq, size, ch, width);
542
543 umc_ud_init(umc_base, ch);
544
545 if (size) {
546 ret = ddrphy_training(phy_base);
547 if (ret)
548 return ret;
549 }
550
551 udelay(1);
552
553 /* match the system latency between UMC and PHY */
554 umc_set_system_latency(umc_dc_base,
555 ddrphy_get_system_latency(phy_base, width));
556
557 udelay(1);
558
559 /* stop auto refresh before clearing FIFO in PHY */
560 umc_refresh_ctrl(umc_dc_base, 0);
561 ddrphy_fifo_reset(phy_base);
562 umc_refresh_ctrl(umc_dc_base, 1);
563
564 udelay(10);
565
566 return 0;
567}
568
569static void um_init(void __iomem *um_base)
570{
571 writel(0x000000ff, um_base + UMC_MBUS0);
572 writel(0x000000ff, um_base + UMC_MBUS1);
573 writel(0x000000ff, um_base + UMC_MBUS2);
574 writel(0x000000ff, um_base + UMC_MBUS3);
575}
576
577int proxstream2_umc_init(const struct uniphier_board_data *bd)
578{
579 void __iomem *um_base = (void __iomem *)0x5b600000;
580 void __iomem *umc_ch0_base = (void __iomem *)0x5b800000;
581 void __iomem *umc_ch1_base = (void __iomem *)0x5ba00000;
582 void __iomem *umc_ch2_base = (void __iomem *)0x5bc00000;
583 enum dram_freq freq;
584 int ret;
585
586 switch (bd->dram_freq) {
587 case 1866:
588 freq = FREQ_1866M;
589 break;
590 case 2133:
591 freq = FREQ_2133M;
592 break;
593 default:
594 printf("unsupported DRAM frequency %d MHz\n", bd->dram_freq);
595 return -EINVAL;
596 }
597
598 ret = umc_init(umc_ch0_base, freq, 0, bd->dram_ch0_size / SZ_256M,
599 bd->dram_ch0_width);
600 if (ret) {
601 printf("failed to initialize UMC ch0\n");
602 return ret;
603 }
604
605 ret = umc_init(umc_ch1_base, freq, 1, bd->dram_ch1_size / SZ_256M,
606 bd->dram_ch1_width);
607 if (ret) {
608 printf("failed to initialize UMC ch1\n");
609 return ret;
610 }
611
612 ret = umc_init(umc_ch2_base, freq, 2, bd->dram_ch2_size / SZ_256M,
613 bd->dram_ch2_width);
614 if (ret) {
615 printf("failed to initialize UMC ch2\n");
616 return ret;
617 }
618
619 um_init(um_base);
620
621 return 0;
622}