blob: dca34ba78bdc793677d8540c1eb56c1128d32b37 [file] [log] [blame]
Masahiro Yamada95e74872016-01-09 01:51:14 +09001/*
2 * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
3 *
4 * based on commit 21b6e480f92ccc38fe0502e3116411d6509d3bf2 of Diag by:
5 * Copyright (C) 2015 Socionext Inc.
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10#include <common.h>
11#include <linux/err.h>
12#include <linux/io.h>
13#include <linux/sizes.h>
14#include <asm/processor.h>
15
16#include "../init.h"
17#include "../soc-info.h"
18#include "ddrmphy-regs.h"
Masahiro Yamada99d80982016-01-17 15:03:29 +090019#include "umc-regs.h"
Masahiro Yamada95e74872016-01-09 01:51:14 +090020
Masahiro Yamada26a2fb02016-02-05 13:21:07 +090021#define CH_NR 3
22
Masahiro Yamada95e74872016-01-09 01:51:14 +090023enum dram_freq {
24 FREQ_1866M,
25 FREQ_2133M,
26 FREQ_NR,
27};
28
29enum dram_size {
30 SIZE_0,
31 SIZE_512M,
32 SIZE_1G,
33 SIZE_NR,
34};
35
36static u32 ddrphy_pgcr2[FREQ_NR] = {0x00FC7E5D, 0x00FC90AB};
37static u32 ddrphy_ptr0[FREQ_NR] = {0x0EA09205, 0x10C0A6C6};
38static u32 ddrphy_ptr1[FREQ_NR] = {0x0DAC041B, 0x0FA104B1};
39static u32 ddrphy_ptr3[FREQ_NR] = {0x15171e45, 0x18182357};
40static u32 ddrphy_ptr4[FREQ_NR] = {0x0e9ad8e9, 0x10b34157};
41static u32 ddrphy_dtpr0[FREQ_NR] = {0x35a00d88, 0x39e40e88};
42static u32 ddrphy_dtpr1[FREQ_NR] = {0x2288cc2c, 0x228a04d0};
43static u32 ddrphy_dtpr2[FREQ_NR] = {0x50005e00, 0x50006a00};
44static u32 ddrphy_dtpr3[FREQ_NR] = {0x0010cb49, 0x0010ec89};
45static u32 ddrphy_mr0[FREQ_NR] = {0x00000115, 0x00000125};
46static u32 ddrphy_mr2[FREQ_NR] = {0x000002a0, 0x000002a8};
47
Masahiro Yamada26a2fb02016-02-05 13:21:07 +090048/* dependent on package and board design */
49static u32 ddrphy_acbdlr0[CH_NR] = {0x0000000c, 0x0000000c, 0x00000009};
50
Masahiro Yamada95e74872016-01-09 01:51:14 +090051static u32 umc_cmdctla[FREQ_NR] = {0x66DD131D, 0x77EE1722};
52/*
53 * The ch2 is a different generation UMC core.
54 * The register spec is different, unfortunately.
55 */
56static u32 umc_cmdctlb_ch01[FREQ_NR] = {0x13E87C44, 0x18F88C44};
57static u32 umc_cmdctlb_ch2[FREQ_NR] = {0x19E8DC44, 0x1EF8EC44};
58static u32 umc_spcctla[FREQ_NR][SIZE_NR] = {
59 {0x00000000, 0x004A071D, 0x0078071D},
60 {0x00000000, 0x0055081E, 0x0089081E},
61};
62
63static u32 umc_spcctlb[] = {0x00FF000A, 0x00FF000B};
64/* The ch2 is different for some reason only hardware guys know... */
65static u32 umc_flowctla_ch01[] = {0x0800001E, 0x08000022};
66static u32 umc_flowctla_ch2[] = {0x0800001E, 0x0800001E};
67
68/* DDR multiPHY */
69static inline int ddrphy_get_rank(int dx)
70{
71 return dx / 2;
72}
73
74static void ddrphy_fifo_reset(void __iomem *phy_base)
75{
76 u32 tmp;
77
78 tmp = readl(phy_base + DMPHY_PGCR0);
79 tmp &= ~DMPHY_PGCR0_PHYFRST;
80 writel(tmp, phy_base + DMPHY_PGCR0);
81
82 udelay(1);
83
84 tmp |= DMPHY_PGCR0_PHYFRST;
85 writel(tmp, phy_base + DMPHY_PGCR0);
86
87 udelay(1);
88}
89
90static void ddrphy_vt_ctrl(void __iomem *phy_base, int enable)
91{
92 u32 tmp;
93
94 tmp = readl(phy_base + DMPHY_PGCR1);
95
96 if (enable)
97 tmp &= ~DMPHY_PGCR1_INHVT;
98 else
99 tmp |= DMPHY_PGCR1_INHVT;
100
101 writel(tmp, phy_base + DMPHY_PGCR1);
102
103 if (!enable) {
104 while (!(readl(phy_base + DMPHY_PGSR1) & DMPHY_PGSR1_VTSTOP))
105 cpu_relax();
106 }
107}
108
109static void ddrphy_dqs_delay_fixup(void __iomem *phy_base, int nr_dx, int step)
110{
111 int dx;
112 u32 lcdlr1, rdqsd;
113 void __iomem *dx_base = phy_base + DMPHY_DX_BASE;
114
115 ddrphy_vt_ctrl(phy_base, 0);
116
117 for (dx = 0; dx < nr_dx; dx++) {
118 lcdlr1 = readl(dx_base + DMPHY_DX_LCDLR1);
119 rdqsd = (lcdlr1 >> 8) & 0xff;
120 rdqsd = clamp(rdqsd + step, 0U, 0xffU);
121 lcdlr1 = (lcdlr1 & ~(0xff << 8)) | (rdqsd << 8);
122 writel(lcdlr1, dx_base + DMPHY_DX_LCDLR1);
123 readl(dx_base + DMPHY_DX_LCDLR1); /* relax */
124 dx_base += DMPHY_DX_STRIDE;
125 }
126
127 ddrphy_vt_ctrl(phy_base, 1);
128}
129
130static int ddrphy_get_system_latency(void __iomem *phy_base, int width)
131{
132 void __iomem *dx_base = phy_base + DMPHY_DX_BASE;
133 const int nr_dx = width / 8;
134 int dx, rank;
135 u32 gtr;
136 int dgsl, dgsl_min = INT_MAX, dgsl_max = 0;
137
138 for (dx = 0; dx < nr_dx; dx++) {
139 gtr = readl(dx_base + DMPHY_DX_GTR);
140 for (rank = 0; rank < 4; rank++) {
141 dgsl = gtr & 0x7;
142 /* if dgsl is zero, this rank was not trained. skip. */
143 if (dgsl) {
144 dgsl_min = min(dgsl_min, dgsl);
145 dgsl_max = max(dgsl_max, dgsl);
146 }
147 gtr >>= 3;
148 }
149 dx_base += DMPHY_DX_STRIDE;
150 }
151
152 if (dgsl_min != dgsl_max)
153 printf("DQS Gateing System Latencies are not all leveled.\n");
154
155 return dgsl_max;
156}
157
Masahiro Yamada26a2fb02016-02-05 13:21:07 +0900158static void ddrphy_init(void __iomem *phy_base, enum dram_freq freq, int width,
159 int ch)
Masahiro Yamada95e74872016-01-09 01:51:14 +0900160{
161 u32 tmp;
162 void __iomem *zq_base, *dx_base;
163 int zq, dx;
164 int nr_dx;
165
166 nr_dx = width / 8;
167
168 writel(DMPHY_PIR_ZCALBYP, phy_base + DMPHY_PIR);
169 /*
170 * Disable RGLVT bit (Read DQS Gating LCDL Delay VT Compensation)
171 * to avoid read error issue.
172 */
173 writel(0x07d81e37, phy_base + DMPHY_PGCR0);
174 writel(0x0200c4e0, phy_base + DMPHY_PGCR1);
175
176 tmp = ddrphy_pgcr2[freq];
177 if (width >= 32)
178 tmp |= DMPHY_PGCR2_DUALCHN | DMPHY_PGCR2_ACPDDC;
179 writel(tmp, phy_base + DMPHY_PGCR2);
180
181 writel(ddrphy_ptr0[freq], phy_base + DMPHY_PTR0);
182 writel(ddrphy_ptr1[freq], phy_base + DMPHY_PTR1);
183 writel(0x00083def, phy_base + DMPHY_PTR2);
184 writel(ddrphy_ptr3[freq], phy_base + DMPHY_PTR3);
185 writel(ddrphy_ptr4[freq], phy_base + DMPHY_PTR4);
186
Masahiro Yamada26a2fb02016-02-05 13:21:07 +0900187 writel(ddrphy_acbdlr0[ch], phy_base + DMPHY_ACBDLR0);
188
Masahiro Yamada95e74872016-01-09 01:51:14 +0900189 writel(0x55555555, phy_base + DMPHY_ACIOCR1);
190 writel(0x00000000, phy_base + DMPHY_ACIOCR2);
191 writel(0x55555555, phy_base + DMPHY_ACIOCR3);
192 writel(0x00000000, phy_base + DMPHY_ACIOCR4);
193 writel(0x00000055, phy_base + DMPHY_ACIOCR5);
194 writel(0x00181aa4, phy_base + DMPHY_DXCCR);
195
196 writel(0x0024641e, phy_base + DMPHY_DSGCR);
197 writel(0x0000040b, phy_base + DMPHY_DCR);
198 writel(ddrphy_dtpr0[freq], phy_base + DMPHY_DTPR0);
199 writel(ddrphy_dtpr1[freq], phy_base + DMPHY_DTPR1);
200 writel(ddrphy_dtpr2[freq], phy_base + DMPHY_DTPR2);
201 writel(ddrphy_dtpr3[freq], phy_base + DMPHY_DTPR3);
202 writel(ddrphy_mr0[freq], phy_base + DMPHY_MR0);
203 writel(0x00000006, phy_base + DMPHY_MR1);
204 writel(ddrphy_mr2[freq], phy_base + DMPHY_MR2);
205 writel(0x00000000, phy_base + DMPHY_MR3);
206
207 tmp = 0;
208 for (dx = 0; dx < nr_dx; dx++)
209 tmp |= BIT(DMPHY_DTCR_RANKEN_SHIFT + ddrphy_get_rank(dx));
210 writel(0x90003087 | tmp, phy_base + DMPHY_DTCR);
211
212 writel(0x00000000, phy_base + DMPHY_DTAR0);
213 writel(0x00000008, phy_base + DMPHY_DTAR1);
214 writel(0x00000010, phy_base + DMPHY_DTAR2);
215 writel(0x00000018, phy_base + DMPHY_DTAR3);
216 writel(0xdd22ee11, phy_base + DMPHY_DTDR0);
217 writel(0x7788bb44, phy_base + DMPHY_DTDR1);
218
219 /* impedance control settings */
220 writel(0x04048900, phy_base + DMPHY_ZQCR);
221
222 zq_base = phy_base + DMPHY_ZQ_BASE;
223 for (zq = 0; zq < 4; zq++) {
224 /*
225 * board-dependent
226 * PXS2: CH0ZQ0=0x5B, CH1ZQ0=0x5B, CH2ZQ0=0x59, others=0x5D
227 */
228 writel(0x0007BB5D, zq_base + DMPHY_ZQ_PR);
229 zq_base += DMPHY_ZQ_STRIDE;
230 }
231
232 /* DATX8 settings */
233 dx_base = phy_base + DMPHY_DX_BASE;
234 for (dx = 0; dx < 4; dx++) {
235 tmp = readl(dx_base + DMPHY_DX_GCR0);
236 tmp &= ~DMPHY_DX_GCR0_WLRKEN_MASK;
237 tmp |= BIT(DMPHY_DX_GCR0_WLRKEN_SHIFT + ddrphy_get_rank(dx)) &
238 DMPHY_DX_GCR0_WLRKEN_MASK;
239 writel(tmp, dx_base + DMPHY_DX_GCR0);
240
241 writel(0x00000000, dx_base + DMPHY_DX_GCR1);
242 writel(0x00000000, dx_base + DMPHY_DX_GCR2);
243 writel(0x00000000, dx_base + DMPHY_DX_GCR3);
244 dx_base += DMPHY_DX_STRIDE;
245 }
246
247 while (!(readl(phy_base + DMPHY_PGSR0) & DMPHY_PGSR0_IDONE))
248 cpu_relax();
249
250 ddrphy_dqs_delay_fixup(phy_base, nr_dx, -4);
251}
252
253struct ddrphy_init_sequence {
254 char *description;
255 u32 init_flag;
256 u32 done_flag;
257 u32 err_flag;
258};
259
260static const struct ddrphy_init_sequence impedance_calibration_sequence[] = {
261 {
262 "Impedance Calibration",
263 DMPHY_PIR_ZCAL,
264 DMPHY_PGSR0_ZCDONE,
265 DMPHY_PGSR0_ZCERR,
266 },
267 { /* sentinel */ }
268};
269
270static const struct ddrphy_init_sequence dram_init_sequence[] = {
271 {
272 "DRAM Initialization",
273 DMPHY_PIR_DRAMRST | DMPHY_PIR_DRAMINIT,
274 DMPHY_PGSR0_DIDONE,
275 0,
276 },
277 { /* sentinel */ }
278};
279
280static const struct ddrphy_init_sequence training_sequence[] = {
281 {
282 "Write Leveling",
283 DMPHY_PIR_WL,
284 DMPHY_PGSR0_WLDONE,
285 DMPHY_PGSR0_WLERR,
286 },
287 {
288 "Read DQS Gate Training",
289 DMPHY_PIR_QSGATE,
290 DMPHY_PGSR0_QSGDONE,
291 DMPHY_PGSR0_QSGERR,
292 },
293 {
294 "Write Leveling Adjustment",
295 DMPHY_PIR_WLADJ,
296 DMPHY_PGSR0_WLADONE,
297 DMPHY_PGSR0_WLAERR,
298 },
299 {
300 "Read Bit Deskew",
301 DMPHY_PIR_RDDSKW,
302 DMPHY_PGSR0_RDDONE,
303 DMPHY_PGSR0_RDERR,
304 },
305 {
306 "Write Bit Deskew",
307 DMPHY_PIR_WRDSKW,
308 DMPHY_PGSR0_WDDONE,
309 DMPHY_PGSR0_WDERR,
310 },
311 {
312 "Read Eye Training",
313 DMPHY_PIR_RDEYE,
314 DMPHY_PGSR0_REDONE,
315 DMPHY_PGSR0_REERR,
316 },
317 {
318 "Write Eye Training",
319 DMPHY_PIR_WREYE,
320 DMPHY_PGSR0_WEDONE,
321 DMPHY_PGSR0_WEERR,
322 },
323 { /* sentinel */ }
324};
325
326static int __ddrphy_training(void __iomem *phy_base,
327 const struct ddrphy_init_sequence *seq)
328{
329 const struct ddrphy_init_sequence *s;
330 u32 pgsr0;
331 u32 init_flag = DMPHY_PIR_INIT;
332 u32 done_flag = DMPHY_PGSR0_IDONE;
333 int timeout = 50000; /* 50 msec is long enough */
334#ifdef DISPLAY_ELAPSED_TIME
335 ulong start = get_timer(0);
336#endif
337
338 for (s = seq; s->description; s++) {
339 init_flag |= s->init_flag;
340 done_flag |= s->done_flag;
341 }
342
343 writel(init_flag, phy_base + DMPHY_PIR);
344
345 do {
346 if (--timeout < 0) {
347 printf("%s: error: timeout during DDR training\n",
348 __func__);
349 return -ETIMEDOUT;
350 }
351 udelay(1);
352 pgsr0 = readl(phy_base + DMPHY_PGSR0);
353 } while ((pgsr0 & done_flag) != done_flag);
354
355 for (s = seq; s->description; s++) {
356 if (pgsr0 & s->err_flag) {
357 printf("%s: error: %s failed\n", __func__,
358 s->description);
359 return -EIO;
360 }
361 }
362
363#ifdef DISPLAY_ELAPSED_TIME
364 printf("%s: info: elapsed time %ld msec\n", get_timer(start));
365#endif
366
367 return 0;
368}
369
370static int ddrphy_impedance_calibration(void __iomem *phy_base)
371{
372 int ret;
373 u32 tmp;
374
375 ret = __ddrphy_training(phy_base, impedance_calibration_sequence);
376 if (ret)
377 return ret;
378
379 /*
380 * Because of a hardware bug, IDONE flag is set when the first ZQ block
381 * is calibrated. The flag does not guarantee the completion for all
382 * the ZQ blocks. Wait a little more just in case.
383 */
384 udelay(1);
385
386 /* reflect ZQ settings and enable average algorithm*/
387 tmp = readl(phy_base + DMPHY_ZQCR);
388 tmp |= DMPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
389 writel(tmp, phy_base + DMPHY_ZQCR);
390 tmp &= ~DMPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
391 tmp |= DMPHY_ZQCR_AVGEN;
392 writel(tmp, phy_base + DMPHY_ZQCR);
393
394 return 0;
395}
396
397static int ddrphy_dram_init(void __iomem *phy_base)
398{
399 return __ddrphy_training(phy_base, dram_init_sequence);
400}
401
402static int ddrphy_training(void __iomem *phy_base)
403{
404 return __ddrphy_training(phy_base, training_sequence);
405}
406
407/* UMC */
408static void umc_set_system_latency(void __iomem *umc_dc_base, int phy_latency)
409{
410 u32 val;
411 int latency;
412
413 val = readl(umc_dc_base + UMC_RDATACTL_D0);
414 latency = (val & UMC_RDATACTL_RADLTY_MASK) >> UMC_RDATACTL_RADLTY_SHIFT;
415 latency += (val & UMC_RDATACTL_RAD2LTY_MASK) >>
416 UMC_RDATACTL_RAD2LTY_SHIFT;
417 /*
418 * UMC works at the half clock rate of the PHY.
419 * The LSB of latency is ignored
420 */
421 latency += phy_latency & ~1;
422
423 val &= ~(UMC_RDATACTL_RADLTY_MASK | UMC_RDATACTL_RAD2LTY_MASK);
424 if (latency > 0xf) {
425 val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
426 val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
427 } else {
428 val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
429 }
430
431 writel(val, umc_dc_base + UMC_RDATACTL_D0);
432 writel(val, umc_dc_base + UMC_RDATACTL_D1);
433
434 readl(umc_dc_base + UMC_RDATACTL_D1); /* relax */
435}
436
437/* enable/disable auto refresh */
438void umc_refresh_ctrl(void __iomem *umc_dc_base, int enable)
439{
440 u32 tmp;
441
442 tmp = readl(umc_dc_base + UMC_SPCSETB);
443 tmp &= ~UMC_SPCSETB_AREFMD_MASK;
444
445 if (enable)
446 tmp |= UMC_SPCSETB_AREFMD_ARB;
447 else
448 tmp |= UMC_SPCSETB_AREFMD_REG;
449
450 writel(tmp, umc_dc_base + UMC_SPCSETB);
451 udelay(1);
452}
453
454static void umc_ud_init(void __iomem *umc_base, int ch)
455{
456 writel(0x00000003, umc_base + UMC_BITPERPIXELMODE_D0);
457
458 if (ch == 2)
459 writel(0x00000033, umc_base + UMC_PAIR1DOFF_D0);
460}
461
462static void umc_dc_init(void __iomem *umc_dc_base, enum dram_freq freq,
463 enum dram_size size, int ch, int width)
464{
465 int latency;
466 u32 val;
467
468 writel(umc_cmdctla[freq], umc_dc_base + UMC_CMDCTLA);
469
470 writel(ch == 2 ? umc_cmdctlb_ch2[freq] : umc_cmdctlb_ch01[freq],
471 umc_dc_base + UMC_CMDCTLB);
472
473 writel(umc_spcctla[freq][size / (width / 16)],
474 umc_dc_base + UMC_SPCCTLA);
475 writel(umc_spcctlb[freq], umc_dc_base + UMC_SPCCTLB);
476
477 val = 0x000e000e;
478 latency = 12;
479 /* ES2 inserted one more FF to the logic. */
480 if (uniphier_get_soc_model() >= 2)
481 latency += 2;
482
483 if (latency > 0xf) {
484 val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
485 val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
486 } else {
487 val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
488 }
489
490 writel(val, umc_dc_base + UMC_RDATACTL_D0);
491 if (width >= 32)
492 writel(val, umc_dc_base + UMC_RDATACTL_D1);
493
494 writel(0x04060A02, umc_dc_base + UMC_WDATACTL_D0);
495 if (width >= 32)
496 writel(0x04060A02, umc_dc_base + UMC_WDATACTL_D1);
497 writel(0x04000000, umc_dc_base + UMC_DATASET);
498 writel(0x00400020, umc_dc_base + UMC_DCCGCTL);
499 writel(0x00000084, umc_dc_base + UMC_FLOWCTLG);
500 writel(0x00000000, umc_dc_base + UMC_ACSSETA);
501
502 writel(ch == 2 ? umc_flowctla_ch2[freq] : umc_flowctla_ch01[freq],
503 umc_dc_base + UMC_FLOWCTLA);
504
505 writel(0x00004400, umc_dc_base + UMC_FLOWCTLC);
506 writel(0x200A0A00, umc_dc_base + UMC_SPCSETB);
507 writel(0x00000520, umc_dc_base + UMC_DFICUPDCTLA);
508 writel(0x0000000D, umc_dc_base + UMC_RESPCTL);
509
510 if (ch != 2) {
511 writel(0x00202000, umc_dc_base + UMC_FLOWCTLB);
512 writel(0xFDBFFFFF, umc_dc_base + UMC_FLOWCTLOB0);
513 writel(0xFFFFFFFF, umc_dc_base + UMC_FLOWCTLOB1);
514 writel(0x00080700, umc_dc_base + UMC_BSICMAPSET);
515 } else {
516 writel(0x00200000, umc_dc_base + UMC_FLOWCTLB);
517 writel(0x00000000, umc_dc_base + UMC_BSICMAPSET);
518 }
519
520 writel(0x00000000, umc_dc_base + UMC_ERRMASKA);
521 writel(0x00000000, umc_dc_base + UMC_ERRMASKB);
522}
523
524static int umc_init(void __iomem *umc_base, enum dram_freq freq, int ch,
525 enum dram_size size, int width)
526{
527 void __iomem *umc_dc_base = umc_base + 0x00011000;
528 void __iomem *phy_base = umc_base + 0x00030000;
529 int ret;
530
531 writel(0x00000002, umc_dc_base + UMC_INITSET);
532 while (readl(umc_dc_base + UMC_INITSTAT) & BIT(2))
533 cpu_relax();
534
535 /* deassert PHY reset signals */
536 writel(UMC_DIOCTLA_CTL_NRST | UMC_DIOCTLA_CFG_NRST,
537 umc_dc_base + UMC_DIOCTLA);
538
Masahiro Yamada26a2fb02016-02-05 13:21:07 +0900539 ddrphy_init(phy_base, freq, width, ch);
Masahiro Yamada95e74872016-01-09 01:51:14 +0900540
541 ret = ddrphy_impedance_calibration(phy_base);
542 if (ret)
543 return ret;
544
545 ddrphy_dram_init(phy_base);
546 if (ret)
547 return ret;
548
549 umc_dc_init(umc_dc_base, freq, size, ch, width);
550
551 umc_ud_init(umc_base, ch);
552
553 if (size) {
554 ret = ddrphy_training(phy_base);
555 if (ret)
556 return ret;
557 }
558
559 udelay(1);
560
561 /* match the system latency between UMC and PHY */
562 umc_set_system_latency(umc_dc_base,
563 ddrphy_get_system_latency(phy_base, width));
564
565 udelay(1);
566
567 /* stop auto refresh before clearing FIFO in PHY */
568 umc_refresh_ctrl(umc_dc_base, 0);
569 ddrphy_fifo_reset(phy_base);
570 umc_refresh_ctrl(umc_dc_base, 1);
571
572 udelay(10);
573
574 return 0;
575}
576
577static void um_init(void __iomem *um_base)
578{
579 writel(0x000000ff, um_base + UMC_MBUS0);
580 writel(0x000000ff, um_base + UMC_MBUS1);
581 writel(0x000000ff, um_base + UMC_MBUS2);
582 writel(0x000000ff, um_base + UMC_MBUS3);
583}
584
585int proxstream2_umc_init(const struct uniphier_board_data *bd)
586{
587 void __iomem *um_base = (void __iomem *)0x5b600000;
Masahiro Yamada11828542016-02-26 14:21:35 +0900588 void __iomem *umc_ch_base = (void __iomem *)0x5b800000;
Masahiro Yamada95e74872016-01-09 01:51:14 +0900589 enum dram_freq freq;
Masahiro Yamada11828542016-02-26 14:21:35 +0900590 int ch, ret;
Masahiro Yamada95e74872016-01-09 01:51:14 +0900591
592 switch (bd->dram_freq) {
593 case 1866:
594 freq = FREQ_1866M;
595 break;
596 case 2133:
597 freq = FREQ_2133M;
598 break;
599 default:
600 printf("unsupported DRAM frequency %d MHz\n", bd->dram_freq);
601 return -EINVAL;
602 }
603
Masahiro Yamada11828542016-02-26 14:21:35 +0900604 for (ch = 0; ch < bd->dram_nr_ch; ch++) {
605 ret = umc_init(umc_ch_base, freq, ch,
606 bd->dram_ch[ch].size / SZ_256M,
607 bd->dram_ch[ch].width);
608 if (ret) {
609 printf("failed to initialize UMC ch%d\n", ch);
610 return ret;
611 }
Masahiro Yamada95e74872016-01-09 01:51:14 +0900612
Masahiro Yamada11828542016-02-26 14:21:35 +0900613 umc_ch_base += 0x00200000;
Masahiro Yamada95e74872016-01-09 01:51:14 +0900614 }
615
616 um_init(um_base);
617
618 return 0;
619}