blob: 6e5e40dd1ae8e2d64bc640a15df6edac5741ee57 [file] [log] [blame]
Tim Harvey8ab871b2014-06-02 16:13:23 -07001/*
2 * Copyright (C) 2014 Gateworks Corporation
3 * Author: Tim Harvey <tharvey@gateworks.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7
8#include <common.h>
9#include <linux/types.h>
Peng Fanda7ada02015-08-17 16:11:04 +080010#include <asm/arch/clock.h>
Tim Harvey8ab871b2014-06-02 16:13:23 -070011#include <asm/arch/mx6-ddr.h>
12#include <asm/arch/sys_proto.h>
13#include <asm/io.h>
14#include <asm/types.h>
Marek Vasut23023572016-03-02 14:49:51 +010015#include <wait_bit.h>
Tim Harvey8ab871b2014-06-02 16:13:23 -070016
Eric Nelsonc448df72016-10-30 16:33:50 -070017#if defined(CONFIG_MX6_DDRCAL)
Marek Vasutab257ed2015-12-16 15:40:06 +010018static void reset_read_data_fifos(void)
19{
20 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
21
22 /* Reset data FIFOs twice. */
23 setbits_le32(&mmdc0->mpdgctrl0, 1 << 31);
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +010024 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +010025
26 setbits_le32(&mmdc0->mpdgctrl0, 1 << 31);
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +010027 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +010028}
29
30static void precharge_all(const bool cs0_enable, const bool cs1_enable)
31{
32 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
33
34 /*
35 * Issue the Precharge-All command to the DDR device for both
36 * chip selects. Note, CON_REQ bit should also remain set. If
37 * only using one chip select, then precharge only the desired
38 * chip select.
39 */
40 if (cs0_enable) { /* CS0 */
41 writel(0x04008050, &mmdc0->mdscr);
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +010042 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +010043 }
44
45 if (cs1_enable) { /* CS1 */
46 writel(0x04008058, &mmdc0->mdscr);
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +010047 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +010048 }
49}
50
51static void force_delay_measurement(int bus_size)
52{
53 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
54 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
55
56 writel(0x800, &mmdc0->mpmur0);
57 if (bus_size == 0x2)
58 writel(0x800, &mmdc1->mpmur0);
59}
60
61static void modify_dg_result(u32 *reg_st0, u32 *reg_st1, u32 *reg_ctrl)
62{
63 u32 dg_tmp_val, dg_dl_abs_offset, dg_hc_del, val_ctrl;
64
65 /*
66 * DQS gating absolute offset should be modified from reflecting
67 * (HW_DG_LOWx + HW_DG_UPx)/2 to reflecting (HW_DG_UPx - 0x80)
68 */
69
70 val_ctrl = readl(reg_ctrl);
71 val_ctrl &= 0xf0000000;
72
73 dg_tmp_val = ((readl(reg_st0) & 0x07ff0000) >> 16) - 0xc0;
74 dg_dl_abs_offset = dg_tmp_val & 0x7f;
75 dg_hc_del = (dg_tmp_val & 0x780) << 1;
76
77 val_ctrl |= dg_dl_abs_offset + dg_hc_del;
78
79 dg_tmp_val = ((readl(reg_st1) & 0x07ff0000) >> 16) - 0xc0;
80 dg_dl_abs_offset = dg_tmp_val & 0x7f;
81 dg_hc_del = (dg_tmp_val & 0x780) << 1;
82
83 val_ctrl |= (dg_dl_abs_offset + dg_hc_del) << 16;
84
85 writel(val_ctrl, reg_ctrl);
86}
87
Marek Vasuta694dac2018-03-30 03:04:43 +020088static void correct_mpwldectr_result(void *reg)
89{
90 /* Limit is 200/256 of CK, which is WL_HC_DELx | 0x48. */
91 const unsigned int limit = 0x148;
92 u32 val = readl(reg);
93 u32 old = val;
94
95 if ((val & 0x17f) > limit)
96 val &= 0xffff << 16;
97
98 if (((val >> 16) & 0x17f) > limit)
99 val &= 0xffff;
100
101 if (old != val)
102 writel(val, reg);
103}
104
Eric Nelsona09d68a2016-10-30 16:33:48 -0700105int mmdc_do_write_level_calibration(struct mx6_ddr_sysinfo const *sysinfo)
Marek Vasutab257ed2015-12-16 15:40:06 +0100106{
107 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
108 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
109 u32 esdmisc_val, zq_val;
110 u32 errors = 0;
Eric Nelsona09d68a2016-10-30 16:33:48 -0700111 u32 ldectrl[4] = {0};
Marek Vasutab257ed2015-12-16 15:40:06 +0100112 u32 ddr_mr1 = 0x4;
Eric Nelsona09d68a2016-10-30 16:33:48 -0700113 u32 rwalat_max;
Marek Vasutab257ed2015-12-16 15:40:06 +0100114
115 /*
116 * Stash old values in case calibration fails,
117 * we need to restore them
118 */
119 ldectrl[0] = readl(&mmdc0->mpwldectrl0);
120 ldectrl[1] = readl(&mmdc0->mpwldectrl1);
Eric Nelsona09d68a2016-10-30 16:33:48 -0700121 if (sysinfo->dsize == 2) {
122 ldectrl[2] = readl(&mmdc1->mpwldectrl0);
123 ldectrl[3] = readl(&mmdc1->mpwldectrl1);
124 }
Marek Vasutab257ed2015-12-16 15:40:06 +0100125
126 /* disable DDR logic power down timer */
127 clrbits_le32(&mmdc0->mdpdc, 0xff00);
128
129 /* disable Adopt power down timer */
130 setbits_le32(&mmdc0->mapsr, 0x1);
131
132 debug("Starting write leveling calibration.\n");
133
134 /*
135 * 2. disable auto refresh and ZQ calibration
136 * before proceeding with Write Leveling calibration
137 */
138 esdmisc_val = readl(&mmdc0->mdref);
139 writel(0x0000C000, &mmdc0->mdref);
140 zq_val = readl(&mmdc0->mpzqhwctrl);
141 writel(zq_val & ~0x3, &mmdc0->mpzqhwctrl);
142
143 /* 3. increase walat and ralat to maximum */
Eric Nelsona09d68a2016-10-30 16:33:48 -0700144 rwalat_max = (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17);
145 setbits_le32(&mmdc0->mdmisc, rwalat_max);
146 if (sysinfo->dsize == 2)
147 setbits_le32(&mmdc1->mdmisc, rwalat_max);
Marek Vasutab257ed2015-12-16 15:40:06 +0100148 /*
149 * 4 & 5. Configure the external DDR device to enter write-leveling
150 * mode through Load Mode Register command.
151 * Register setting:
152 * Bits[31:16] MR1 value (0x0080 write leveling enable)
153 * Bit[9] set WL_EN to enable MMDC DQS output
154 * Bits[6:4] set CMD bits for Load Mode Register programming
155 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming
156 */
157 writel(0x00808231, &mmdc0->mdscr);
158
159 /* 6. Activate automatic calibration by setting MPWLGCR[HW_WL_EN] */
160 writel(0x00000001, &mmdc0->mpwlgcr);
161
162 /*
163 * 7. Upon completion of this process the MMDC de-asserts
164 * the MPWLGCR[HW_WL_EN]
165 */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100166 wait_for_bit_le32(&mmdc0->mpwlgcr, 1 << 0, 0, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +0100167
168 /*
169 * 8. check for any errors: check both PHYs for x64 configuration,
170 * if x32, check only PHY0
171 */
172 if (readl(&mmdc0->mpwlgcr) & 0x00000F00)
173 errors |= 1;
Eric Nelsona09d68a2016-10-30 16:33:48 -0700174 if (sysinfo->dsize == 2)
175 if (readl(&mmdc1->mpwlgcr) & 0x00000F00)
176 errors |= 2;
Marek Vasutab257ed2015-12-16 15:40:06 +0100177
178 debug("Ending write leveling calibration. Error mask: 0x%x\n", errors);
179
180 /* check to see if cal failed */
181 if ((readl(&mmdc0->mpwldectrl0) == 0x001F001F) &&
182 (readl(&mmdc0->mpwldectrl1) == 0x001F001F) &&
Eric Nelsona09d68a2016-10-30 16:33:48 -0700183 ((sysinfo->dsize < 2) ||
184 ((readl(&mmdc1->mpwldectrl0) == 0x001F001F) &&
185 (readl(&mmdc1->mpwldectrl1) == 0x001F001F)))) {
Marek Vasutab257ed2015-12-16 15:40:06 +0100186 debug("Cal seems to have soft-failed due to memory not supporting write leveling on all channels. Restoring original write leveling values.\n");
187 writel(ldectrl[0], &mmdc0->mpwldectrl0);
188 writel(ldectrl[1], &mmdc0->mpwldectrl1);
Eric Nelsona09d68a2016-10-30 16:33:48 -0700189 if (sysinfo->dsize == 2) {
190 writel(ldectrl[2], &mmdc1->mpwldectrl0);
191 writel(ldectrl[3], &mmdc1->mpwldectrl1);
192 }
Marek Vasutab257ed2015-12-16 15:40:06 +0100193 errors |= 4;
194 }
195
Marek Vasuta694dac2018-03-30 03:04:43 +0200196 correct_mpwldectr_result(&mmdc0->mpwldectrl0);
197 correct_mpwldectr_result(&mmdc0->mpwldectrl1);
198 if (sysinfo->dsize == 2) {
199 correct_mpwldectr_result(&mmdc1->mpwldectrl0);
200 correct_mpwldectr_result(&mmdc1->mpwldectrl1);
201 }
202
Marek Vasutab257ed2015-12-16 15:40:06 +0100203 /*
204 * User should issue MRS command to exit write leveling mode
205 * through Load Mode Register command
206 * Register setting:
207 * Bits[31:16] MR1 value "ddr_mr1" value from initialization
208 * Bit[9] clear WL_EN to disable MMDC DQS output
209 * Bits[6:4] set CMD bits for Load Mode Register programming
210 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming
211 */
212 writel((ddr_mr1 << 16) + 0x8031, &mmdc0->mdscr);
213
214 /* re-enable auto refresh and zq cal */
215 writel(esdmisc_val, &mmdc0->mdref);
216 writel(zq_val, &mmdc0->mpzqhwctrl);
217
218 debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n",
219 readl(&mmdc0->mpwldectrl0));
220 debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n",
221 readl(&mmdc0->mpwldectrl1));
Eric Nelsona09d68a2016-10-30 16:33:48 -0700222 if (sysinfo->dsize == 2) {
223 debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n",
224 readl(&mmdc1->mpwldectrl0));
225 debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n",
226 readl(&mmdc1->mpwldectrl1));
227 }
Marek Vasutab257ed2015-12-16 15:40:06 +0100228
229 /* We must force a readback of these values, to get them to stick */
230 readl(&mmdc0->mpwldectrl0);
231 readl(&mmdc0->mpwldectrl1);
Eric Nelsona09d68a2016-10-30 16:33:48 -0700232 if (sysinfo->dsize == 2) {
233 readl(&mmdc1->mpwldectrl0);
234 readl(&mmdc1->mpwldectrl1);
235 }
Marek Vasutab257ed2015-12-16 15:40:06 +0100236
237 /* enable DDR logic power down timer: */
238 setbits_le32(&mmdc0->mdpdc, 0x00005500);
239
240 /* Enable Adopt power down timer: */
241 clrbits_le32(&mmdc0->mapsr, 0x1);
242
243 /* Clear CON_REQ */
244 writel(0, &mmdc0->mdscr);
245
246 return errors;
247}
248
Eric Nelsona09d68a2016-10-30 16:33:48 -0700249int mmdc_do_dqs_calibration(struct mx6_ddr_sysinfo const *sysinfo)
Marek Vasutab257ed2015-12-16 15:40:06 +0100250{
251 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
252 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
253 struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux =
254 (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
255 bool cs0_enable;
256 bool cs1_enable;
257 bool cs0_enable_initial;
258 bool cs1_enable_initial;
259 u32 esdmisc_val;
Marek Vasutab257ed2015-12-16 15:40:06 +0100260 u32 temp_ref;
261 u32 pddword = 0x00ffff00; /* best so far, place into MPPDCMPR1 */
262 u32 errors = 0;
263 u32 initdelay = 0x40404040;
264
265 /* check to see which chip selects are enabled */
266 cs0_enable_initial = readl(&mmdc0->mdctl) & 0x80000000;
267 cs1_enable_initial = readl(&mmdc0->mdctl) & 0x40000000;
268
269 /* disable DDR logic power down timer: */
270 clrbits_le32(&mmdc0->mdpdc, 0xff00);
271
272 /* disable Adopt power down timer: */
273 setbits_le32(&mmdc0->mapsr, 0x1);
274
275 /* set DQS pull ups */
276 setbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000);
277 setbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000);
278 setbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000);
279 setbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000);
280 setbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000);
281 setbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000);
282 setbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000);
283 setbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000);
284
285 /* Save old RALAT and WALAT values */
286 esdmisc_val = readl(&mmdc0->mdmisc);
287
288 setbits_le32(&mmdc0->mdmisc,
289 (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17));
290
291 /* Disable auto refresh before proceeding with calibration */
292 temp_ref = readl(&mmdc0->mdref);
293 writel(0x0000c000, &mmdc0->mdref);
294
295 /*
296 * Per the ref manual, issue one refresh cycle MDSCR[CMD]= 0x2,
297 * this also sets the CON_REQ bit.
298 */
299 if (cs0_enable_initial)
300 writel(0x00008020, &mmdc0->mdscr);
301 if (cs1_enable_initial)
302 writel(0x00008028, &mmdc0->mdscr);
303
304 /* poll to make sure the con_ack bit was asserted */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100305 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +0100306
307 /*
308 * Check MDMISC register CALIB_PER_CS to see which CS calibration
309 * is targeted to (under normal cases, it should be cleared
310 * as this is the default value, indicating calibration is directed
311 * to CS0).
312 * Disable the other chip select not being target for calibration
313 * to avoid any potential issues. This will get re-enabled at end
314 * of calibration.
315 */
316 if ((readl(&mmdc0->mdmisc) & 0x00100000) == 0)
317 clrbits_le32(&mmdc0->mdctl, 1 << 30); /* clear SDE_1 */
318 else
319 clrbits_le32(&mmdc0->mdctl, 1 << 31); /* clear SDE_0 */
320
321 /*
322 * Check to see which chip selects are now enabled for
323 * the remainder of the calibration.
324 */
325 cs0_enable = readl(&mmdc0->mdctl) & 0x80000000;
326 cs1_enable = readl(&mmdc0->mdctl) & 0x40000000;
327
Marek Vasutab257ed2015-12-16 15:40:06 +0100328 precharge_all(cs0_enable, cs1_enable);
329
330 /* Write the pre-defined value into MPPDCMPR1 */
331 writel(pddword, &mmdc0->mppdcmpr1);
332
333 /*
334 * Issue a write access to the external DDR device by setting
335 * the bit SW_DUMMY_WR (bit 0) in the MPSWDAR0 and then poll
336 * this bit until it clears to indicate completion of the write access.
337 */
338 setbits_le32(&mmdc0->mpswdar0, 1);
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100339 wait_for_bit_le32(&mmdc0->mpswdar0, 1 << 0, 0, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +0100340
341 /* Set the RD_DL_ABS# bits to their default values
342 * (will be calibrated later in the read delay-line calibration).
343 * Both PHYs for x64 configuration, if x32, do only PHY0.
344 */
345 writel(initdelay, &mmdc0->mprddlctl);
Eric Nelsona09d68a2016-10-30 16:33:48 -0700346 if (sysinfo->dsize == 0x2)
Marek Vasutab257ed2015-12-16 15:40:06 +0100347 writel(initdelay, &mmdc1->mprddlctl);
348
349 /* Force a measurment, for previous delay setup to take effect. */
Eric Nelsona09d68a2016-10-30 16:33:48 -0700350 force_delay_measurement(sysinfo->dsize);
Marek Vasutab257ed2015-12-16 15:40:06 +0100351
352 /*
353 * ***************************
354 * Read DQS Gating calibration
355 * ***************************
356 */
357 debug("Starting Read DQS Gating calibration.\n");
358
359 /*
360 * Reset the read data FIFOs (two resets); only need to issue reset
361 * to PHY0 since in x64 mode, the reset will also go to PHY1.
362 */
363 reset_read_data_fifos();
364
365 /*
366 * Start the automatic read DQS gating calibration process by
367 * asserting MPDGCTRL0[HW_DG_EN] and MPDGCTRL0[DG_CMP_CYC]
368 * and then poll MPDGCTRL0[HW_DG_EN]] until this bit clears
369 * to indicate completion.
370 * Also, ensure that MPDGCTRL0[HW_DG_ERR] is clear to indicate
371 * no errors were seen during calibration.
372 */
373
374 /*
375 * Set bit 30: chooses option to wait 32 cycles instead of
376 * 16 before comparing read data.
377 */
378 setbits_le32(&mmdc0->mpdgctrl0, 1 << 30);
Eric Nelson4285a532016-10-30 16:33:47 -0700379 if (sysinfo->dsize == 2)
380 setbits_le32(&mmdc1->mpdgctrl0, 1 << 30);
Marek Vasutab257ed2015-12-16 15:40:06 +0100381
382 /* Set bit 28 to start automatic read DQS gating calibration */
383 setbits_le32(&mmdc0->mpdgctrl0, 5 << 28);
384
385 /* Poll for completion. MPDGCTRL0[HW_DG_EN] should be 0 */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100386 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 28, 0, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +0100387
388 /*
389 * Check to see if any errors were encountered during calibration
390 * (check MPDGCTRL0[HW_DG_ERR]).
391 * Check both PHYs for x64 configuration, if x32, check only PHY0.
392 */
393 if (readl(&mmdc0->mpdgctrl0) & 0x00001000)
394 errors |= 1;
395
Eric Nelsona09d68a2016-10-30 16:33:48 -0700396 if ((sysinfo->dsize == 0x2) && (readl(&mmdc1->mpdgctrl0) & 0x00001000))
Marek Vasutab257ed2015-12-16 15:40:06 +0100397 errors |= 2;
398
Eric Nelson4285a532016-10-30 16:33:47 -0700399 /* now disable mpdgctrl0[DG_CMP_CYC] */
400 clrbits_le32(&mmdc0->mpdgctrl0, 1 << 30);
401 if (sysinfo->dsize == 2)
402 clrbits_le32(&mmdc1->mpdgctrl0, 1 << 30);
403
Marek Vasutab257ed2015-12-16 15:40:06 +0100404 /*
405 * DQS gating absolute offset should be modified from
406 * reflecting (HW_DG_LOWx + HW_DG_UPx)/2 to
407 * reflecting (HW_DG_UPx - 0x80)
408 */
409 modify_dg_result(&mmdc0->mpdghwst0, &mmdc0->mpdghwst1,
410 &mmdc0->mpdgctrl0);
411 modify_dg_result(&mmdc0->mpdghwst2, &mmdc0->mpdghwst3,
412 &mmdc0->mpdgctrl1);
Eric Nelsona09d68a2016-10-30 16:33:48 -0700413 if (sysinfo->dsize == 0x2) {
Marek Vasutab257ed2015-12-16 15:40:06 +0100414 modify_dg_result(&mmdc1->mpdghwst0, &mmdc1->mpdghwst1,
415 &mmdc1->mpdgctrl0);
416 modify_dg_result(&mmdc1->mpdghwst2, &mmdc1->mpdghwst3,
417 &mmdc1->mpdgctrl1);
418 }
419 debug("Ending Read DQS Gating calibration. Error mask: 0x%x\n", errors);
420
421 /*
422 * **********************
423 * Read Delay calibration
424 * **********************
425 */
426 debug("Starting Read Delay calibration.\n");
427
428 reset_read_data_fifos();
429
430 /*
431 * 4. Issue the Precharge-All command to the DDR device for both
432 * chip selects. If only using one chip select, then precharge
433 * only the desired chip select.
434 */
435 precharge_all(cs0_enable, cs1_enable);
436
437 /*
438 * 9. Read delay-line calibration
439 * Start the automatic read calibration process by asserting
440 * MPRDDLHWCTL[HW_RD_DL_EN].
441 */
442 writel(0x00000030, &mmdc0->mprddlhwctl);
443
444 /*
445 * 10. poll for completion
446 * MMDC indicates that the write data calibration had finished by
447 * setting MPRDDLHWCTL[HW_RD_DL_EN] = 0. Also, ensure that
448 * no error bits were set.
449 */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100450 wait_for_bit_le32(&mmdc0->mprddlhwctl, 1 << 4, 0, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +0100451
452 /* check both PHYs for x64 configuration, if x32, check only PHY0 */
453 if (readl(&mmdc0->mprddlhwctl) & 0x0000000f)
454 errors |= 4;
455
Eric Nelsona09d68a2016-10-30 16:33:48 -0700456 if ((sysinfo->dsize == 0x2) &&
457 (readl(&mmdc1->mprddlhwctl) & 0x0000000f))
Marek Vasutab257ed2015-12-16 15:40:06 +0100458 errors |= 8;
459
460 debug("Ending Read Delay calibration. Error mask: 0x%x\n", errors);
461
462 /*
463 * ***********************
464 * Write Delay Calibration
465 * ***********************
466 */
467 debug("Starting Write Delay calibration.\n");
468
469 reset_read_data_fifos();
470
471 /*
472 * 4. Issue the Precharge-All command to the DDR device for both
473 * chip selects. If only using one chip select, then precharge
474 * only the desired chip select.
475 */
476 precharge_all(cs0_enable, cs1_enable);
477
478 /*
479 * 8. Set the WR_DL_ABS# bits to their default values.
480 * Both PHYs for x64 configuration, if x32, do only PHY0.
481 */
482 writel(initdelay, &mmdc0->mpwrdlctl);
Eric Nelsona09d68a2016-10-30 16:33:48 -0700483 if (sysinfo->dsize == 0x2)
Marek Vasutab257ed2015-12-16 15:40:06 +0100484 writel(initdelay, &mmdc1->mpwrdlctl);
485
486 /*
487 * XXX This isn't in the manual. Force a measurement,
488 * for previous delay setup to effect.
489 */
Eric Nelsona09d68a2016-10-30 16:33:48 -0700490 force_delay_measurement(sysinfo->dsize);
Marek Vasutab257ed2015-12-16 15:40:06 +0100491
492 /*
493 * 9. 10. Start the automatic write calibration process
494 * by asserting MPWRDLHWCTL0[HW_WR_DL_EN].
495 */
496 writel(0x00000030, &mmdc0->mpwrdlhwctl);
497
498 /*
499 * Poll for completion.
500 * MMDC indicates that the write data calibration had finished
501 * by setting MPWRDLHWCTL[HW_WR_DL_EN] = 0.
502 * Also, ensure that no error bits were set.
503 */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100504 wait_for_bit_le32(&mmdc0->mpwrdlhwctl, 1 << 4, 0, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +0100505
506 /* Check both PHYs for x64 configuration, if x32, check only PHY0 */
507 if (readl(&mmdc0->mpwrdlhwctl) & 0x0000000f)
508 errors |= 16;
509
Eric Nelsona09d68a2016-10-30 16:33:48 -0700510 if ((sysinfo->dsize == 0x2) &&
511 (readl(&mmdc1->mpwrdlhwctl) & 0x0000000f))
Marek Vasutab257ed2015-12-16 15:40:06 +0100512 errors |= 32;
513
514 debug("Ending Write Delay calibration. Error mask: 0x%x\n", errors);
515
516 reset_read_data_fifos();
517
518 /* Enable DDR logic power down timer */
519 setbits_le32(&mmdc0->mdpdc, 0x00005500);
520
521 /* Enable Adopt power down timer */
522 clrbits_le32(&mmdc0->mapsr, 0x1);
523
524 /* Restore MDMISC value (RALAT, WALAT) to MMDCP1 */
525 writel(esdmisc_val, &mmdc0->mdmisc);
526
527 /* Clear DQS pull ups */
528 clrbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000);
529 clrbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000);
530 clrbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000);
531 clrbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000);
532 clrbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000);
533 clrbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000);
534 clrbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000);
535 clrbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000);
536
537 /* Re-enable SDE (chip selects) if they were set initially */
538 if (cs1_enable_initial)
539 /* Set SDE_1 */
540 setbits_le32(&mmdc0->mdctl, 1 << 30);
541
542 if (cs0_enable_initial)
543 /* Set SDE_0 */
544 setbits_le32(&mmdc0->mdctl, 1 << 31);
545
546 /* Re-enable to auto refresh */
547 writel(temp_ref, &mmdc0->mdref);
548
549 /* Clear the MDSCR (including the con_req bit) */
550 writel(0x0, &mmdc0->mdscr); /* CS0 */
551
552 /* Poll to make sure the con_ack bit is clear */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100553 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 0, 100, 0);
Marek Vasutab257ed2015-12-16 15:40:06 +0100554
555 /*
556 * Print out the registers that were updated as a result
557 * of the calibration process.
558 */
559 debug("MMDC registers updated from calibration\n");
560 debug("Read DQS gating calibration:\n");
561 debug("\tMPDGCTRL0 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl0));
562 debug("\tMPDGCTRL1 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl1));
Eric Nelsona09d68a2016-10-30 16:33:48 -0700563 if (sysinfo->dsize == 2) {
564 debug("\tMPDGCTRL0 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl0));
565 debug("\tMPDGCTRL1 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl1));
566 }
Marek Vasutab257ed2015-12-16 15:40:06 +0100567 debug("Read calibration:\n");
568 debug("\tMPRDDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mprddlctl));
Eric Nelsona09d68a2016-10-30 16:33:48 -0700569 if (sysinfo->dsize == 2)
570 debug("\tMPRDDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mprddlctl));
Marek Vasutab257ed2015-12-16 15:40:06 +0100571 debug("Write calibration:\n");
572 debug("\tMPWRDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mpwrdlctl));
Eric Nelsona09d68a2016-10-30 16:33:48 -0700573 if (sysinfo->dsize == 2)
574 debug("\tMPWRDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mpwrdlctl));
Marek Vasutab257ed2015-12-16 15:40:06 +0100575
576 /*
577 * Registers below are for debugging purposes. These print out
578 * the upper and lower boundaries captured during
579 * read DQS gating calibration.
580 */
581 debug("Status registers bounds for read DQS gating:\n");
582 debug("\tMPDGHWST0 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst0));
583 debug("\tMPDGHWST1 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst1));
584 debug("\tMPDGHWST2 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst2));
585 debug("\tMPDGHWST3 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst3));
Eric Nelsona09d68a2016-10-30 16:33:48 -0700586 if (sysinfo->dsize == 2) {
587 debug("\tMPDGHWST0 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst0));
588 debug("\tMPDGHWST1 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst1));
589 debug("\tMPDGHWST2 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst2));
590 debug("\tMPDGHWST3 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst3));
591 }
Marek Vasutab257ed2015-12-16 15:40:06 +0100592
593 debug("Final do_dqs_calibration error mask: 0x%x\n", errors);
594
595 return errors;
596}
597#endif
598
Peng Fan2ecdd022014-12-30 17:24:01 +0800599#if defined(CONFIG_MX6SX)
600/* Configure MX6SX mmdc iomux */
601void mx6sx_dram_iocfg(unsigned width,
602 const struct mx6sx_iomux_ddr_regs *ddr,
603 const struct mx6sx_iomux_grp_regs *grp)
604{
605 struct mx6sx_iomux_ddr_regs *mx6_ddr_iomux;
606 struct mx6sx_iomux_grp_regs *mx6_grp_iomux;
607
608 mx6_ddr_iomux = (struct mx6sx_iomux_ddr_regs *)MX6SX_IOM_DDR_BASE;
609 mx6_grp_iomux = (struct mx6sx_iomux_grp_regs *)MX6SX_IOM_GRP_BASE;
610
611 /* DDR IO TYPE */
612 writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type);
613 writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke);
614
615 /* CLOCK */
616 writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0);
617
618 /* ADDRESS */
619 writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas);
620 writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras);
621 writel(grp->grp_addds, &mx6_grp_iomux->grp_addds);
622
623 /* Control */
624 writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset);
625 writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2);
626 writel(ddr->dram_sdcke0, &mx6_ddr_iomux->dram_sdcke0);
627 writel(ddr->dram_sdcke1, &mx6_ddr_iomux->dram_sdcke1);
628 writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0);
629 writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1);
630 writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds);
631
632 /* Data Strobes */
633 writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl);
634 writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0);
635 writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1);
636 if (width >= 32) {
637 writel(ddr->dram_sdqs2, &mx6_ddr_iomux->dram_sdqs2);
638 writel(ddr->dram_sdqs3, &mx6_ddr_iomux->dram_sdqs3);
639 }
640
641 /* Data */
642 writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode);
643 writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds);
644 writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds);
645 if (width >= 32) {
646 writel(grp->grp_b2ds, &mx6_grp_iomux->grp_b2ds);
647 writel(grp->grp_b3ds, &mx6_grp_iomux->grp_b3ds);
648 }
649 writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0);
650 writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1);
651 if (width >= 32) {
652 writel(ddr->dram_dqm2, &mx6_ddr_iomux->dram_dqm2);
653 writel(ddr->dram_dqm3, &mx6_ddr_iomux->dram_dqm3);
654 }
655}
656#endif
657
Fabio Estevam1b691df2018-01-03 12:33:05 -0200658#if defined(CONFIG_MX6UL) || defined(CONFIG_MX6ULL)
Peng Fan98f11a12015-07-20 19:28:33 +0800659void mx6ul_dram_iocfg(unsigned width,
660 const struct mx6ul_iomux_ddr_regs *ddr,
661 const struct mx6ul_iomux_grp_regs *grp)
662{
663 struct mx6ul_iomux_ddr_regs *mx6_ddr_iomux;
664 struct mx6ul_iomux_grp_regs *mx6_grp_iomux;
665
666 mx6_ddr_iomux = (struct mx6ul_iomux_ddr_regs *)MX6UL_IOM_DDR_BASE;
667 mx6_grp_iomux = (struct mx6ul_iomux_grp_regs *)MX6UL_IOM_GRP_BASE;
668
669 /* DDR IO TYPE */
670 writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type);
671 writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke);
672
673 /* CLOCK */
674 writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0);
675
676 /* ADDRESS */
677 writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas);
678 writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras);
679 writel(grp->grp_addds, &mx6_grp_iomux->grp_addds);
680
681 /* Control */
682 writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset);
683 writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2);
684 writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0);
685 writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1);
686 writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds);
687
688 /* Data Strobes */
689 writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl);
690 writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0);
691 writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1);
692
693 /* Data */
694 writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode);
695 writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds);
696 writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds);
697 writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0);
698 writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1);
699}
700#endif
701
Peng Fand226fac2015-08-17 16:11:00 +0800702#if defined(CONFIG_MX6SL)
703void mx6sl_dram_iocfg(unsigned width,
704 const struct mx6sl_iomux_ddr_regs *ddr,
705 const struct mx6sl_iomux_grp_regs *grp)
706{
707 struct mx6sl_iomux_ddr_regs *mx6_ddr_iomux;
708 struct mx6sl_iomux_grp_regs *mx6_grp_iomux;
709
710 mx6_ddr_iomux = (struct mx6sl_iomux_ddr_regs *)MX6SL_IOM_DDR_BASE;
711 mx6_grp_iomux = (struct mx6sl_iomux_grp_regs *)MX6SL_IOM_GRP_BASE;
712
713 /* DDR IO TYPE */
714 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
715 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
716
717 /* CLOCK */
718 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
719
720 /* ADDRESS */
721 mx6_ddr_iomux->dram_cas = ddr->dram_cas;
722 mx6_ddr_iomux->dram_ras = ddr->dram_ras;
723 mx6_grp_iomux->grp_addds = grp->grp_addds;
724
725 /* Control */
726 mx6_ddr_iomux->dram_reset = ddr->dram_reset;
727 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
728 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
729
730 /* Data Strobes */
731 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
732 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
733 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
734 if (width >= 32) {
735 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
736 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
737 }
738
739 /* Data */
740 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
741 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
742 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
743 if (width >= 32) {
744 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
745 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
746 }
747
748 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
749 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
750 if (width >= 32) {
751 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
752 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
753 }
754}
755#endif
756
Tim Harvey8ab871b2014-06-02 16:13:23 -0700757#if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6Q) || defined(CONFIG_MX6D)
758/* Configure MX6DQ mmdc iomux */
759void mx6dq_dram_iocfg(unsigned width,
760 const struct mx6dq_iomux_ddr_regs *ddr,
761 const struct mx6dq_iomux_grp_regs *grp)
762{
763 volatile struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux;
764 volatile struct mx6dq_iomux_grp_regs *mx6_grp_iomux;
765
766 mx6_ddr_iomux = (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
767 mx6_grp_iomux = (struct mx6dq_iomux_grp_regs *)MX6DQ_IOM_GRP_BASE;
768
769 /* DDR IO Type */
770 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
771 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
772
773 /* Clock */
774 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
775 mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
776
777 /* Address */
778 mx6_ddr_iomux->dram_cas = ddr->dram_cas;
779 mx6_ddr_iomux->dram_ras = ddr->dram_ras;
780 mx6_grp_iomux->grp_addds = grp->grp_addds;
781
782 /* Control */
783 mx6_ddr_iomux->dram_reset = ddr->dram_reset;
784 mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
785 mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
786 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
787 mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
788 mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
789 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
790
791 /* Data Strobes */
792 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
793 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
794 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
795 if (width >= 32) {
796 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
797 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
798 }
799 if (width >= 64) {
800 mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
801 mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
802 mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
803 mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
804 }
805
806 /* Data */
807 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
808 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
809 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
810 if (width >= 32) {
811 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
812 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
813 }
814 if (width >= 64) {
815 mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
816 mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
817 mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
818 mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
819 }
820 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
821 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
822 if (width >= 32) {
823 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
824 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
825 }
826 if (width >= 64) {
827 mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
828 mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
829 mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
830 mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
831 }
832}
833#endif
834
835#if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6DL) || defined(CONFIG_MX6S)
836/* Configure MX6SDL mmdc iomux */
837void mx6sdl_dram_iocfg(unsigned width,
838 const struct mx6sdl_iomux_ddr_regs *ddr,
839 const struct mx6sdl_iomux_grp_regs *grp)
840{
841 volatile struct mx6sdl_iomux_ddr_regs *mx6_ddr_iomux;
842 volatile struct mx6sdl_iomux_grp_regs *mx6_grp_iomux;
843
844 mx6_ddr_iomux = (struct mx6sdl_iomux_ddr_regs *)MX6SDL_IOM_DDR_BASE;
845 mx6_grp_iomux = (struct mx6sdl_iomux_grp_regs *)MX6SDL_IOM_GRP_BASE;
846
847 /* DDR IO Type */
848 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
849 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
850
851 /* Clock */
852 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
853 mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
854
855 /* Address */
856 mx6_ddr_iomux->dram_cas = ddr->dram_cas;
857 mx6_ddr_iomux->dram_ras = ddr->dram_ras;
858 mx6_grp_iomux->grp_addds = grp->grp_addds;
859
860 /* Control */
861 mx6_ddr_iomux->dram_reset = ddr->dram_reset;
862 mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
863 mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
864 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
865 mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
866 mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
867 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
868
869 /* Data Strobes */
870 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
871 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
872 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
873 if (width >= 32) {
874 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
875 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
876 }
877 if (width >= 64) {
878 mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
879 mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
880 mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
881 mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
882 }
883
884 /* Data */
885 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
886 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
887 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
888 if (width >= 32) {
889 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
890 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
891 }
892 if (width >= 64) {
893 mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
894 mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
895 mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
896 mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
897 }
898 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
899 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
900 if (width >= 32) {
901 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
902 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
903 }
904 if (width >= 64) {
905 mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
906 mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
907 mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
908 mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
909 }
910}
911#endif
912
913/*
914 * Configure mx6 mmdc registers based on:
915 * - board-specific memory configuration
916 * - board-specific calibration data
Peng Fanda7ada02015-08-17 16:11:04 +0800917 * - ddr3/lpddr2 chip details
Tim Harvey8ab871b2014-06-02 16:13:23 -0700918 *
919 * The various calculations here are derived from the Freescale
Peng Fanda7ada02015-08-17 16:11:04 +0800920 * 1. i.Mx6DQSDL DDR3 Script Aid spreadsheet (DOC-94917) designed to generate
921 * MMDC configuration registers based on memory system and memory chip
922 * parameters.
923 *
924 * 2. i.Mx6SL LPDDR2 Script Aid spreadsheet V0.04 designed to generate MMDC
925 * configuration registers based on memory system and memory chip
926 * parameters.
Tim Harvey8ab871b2014-06-02 16:13:23 -0700927 *
928 * The defaults here are those which were specified in the spreadsheet.
929 * For details on each register, refer to the IMX6DQRM and/or IMX6SDLRM
Peng Fanda7ada02015-08-17 16:11:04 +0800930 * and/or IMX6SLRM section titled MMDC initialization.
Tim Harvey8ab871b2014-06-02 16:13:23 -0700931 */
932#define MR(val, ba, cmd, cs1) \
933 ((val << 16) | (1 << 15) | (cmd << 4) | (cs1 << 3) | ba)
Peng Fan98f11a12015-07-20 19:28:33 +0800934#define MMDC1(entry, value) do { \
Fabio Estevam8548f972018-01-01 22:51:45 -0200935 if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl()) \
Peng Fan98f11a12015-07-20 19:28:33 +0800936 mmdc1->entry = value; \
937 } while (0)
938
Peng Fanda7ada02015-08-17 16:11:04 +0800939/*
940 * According JESD209-2B-LPDDR2: Table 103
941 * WL: write latency
942 */
943static int lpddr2_wl(uint32_t mem_speed)
944{
945 switch (mem_speed) {
946 case 1066:
947 case 933:
948 return 4;
949 case 800:
950 return 3;
951 case 677:
952 case 533:
953 return 2;
954 case 400:
955 case 333:
956 return 1;
957 default:
958 puts("invalid memory speed\n");
959 hang();
960 }
961
962 return 0;
963}
964
965/*
966 * According JESD209-2B-LPDDR2: Table 103
967 * RL: read latency
968 */
969static int lpddr2_rl(uint32_t mem_speed)
970{
971 switch (mem_speed) {
972 case 1066:
973 return 8;
974 case 933:
975 return 7;
976 case 800:
977 return 6;
978 case 677:
979 return 5;
980 case 533:
981 return 4;
982 case 400:
983 case 333:
984 return 3;
985 default:
986 puts("invalid memory speed\n");
987 hang();
988 }
989
990 return 0;
991}
992
993void mx6_lpddr2_cfg(const struct mx6_ddr_sysinfo *sysinfo,
994 const struct mx6_mmdc_calibration *calib,
995 const struct mx6_lpddr2_cfg *lpddr2_cfg)
996{
997 volatile struct mmdc_p_regs *mmdc0;
998 u32 val;
999 u8 tcke, tcksrx, tcksre, trrd;
1000 u8 twl, txp, tfaw, tcl;
1001 u16 tras, twr, tmrd, trtp, twtr, trfc, txsr;
1002 u16 trcd_lp, trppb_lp, trpab_lp, trc_lp;
1003 u16 cs0_end;
1004 u8 coladdr;
1005 int clkper; /* clock period in picoseconds */
1006 int clock; /* clock freq in mHz */
1007 int cs;
1008
1009 /* only support 16/32 bits */
1010 if (sysinfo->dsize > 1)
1011 hang();
1012
1013 mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
1014
1015 clock = mxc_get_clock(MXC_DDR_CLK) / 1000000U;
1016 clkper = (1000 * 1000) / clock; /* pico seconds */
1017
1018 twl = lpddr2_wl(lpddr2_cfg->mem_speed) - 1;
1019
1020 /* LPDDR2-S2 and LPDDR2-S4 have the same tRFC value. */
1021 switch (lpddr2_cfg->density) {
1022 case 1:
1023 case 2:
1024 case 4:
1025 trfc = DIV_ROUND_UP(130000, clkper) - 1;
1026 txsr = DIV_ROUND_UP(140000, clkper) - 1;
1027 break;
1028 case 8:
1029 trfc = DIV_ROUND_UP(210000, clkper) - 1;
1030 txsr = DIV_ROUND_UP(220000, clkper) - 1;
1031 break;
1032 default:
1033 /*
1034 * 64Mb, 128Mb, 256Mb, 512Mb are not supported currently.
1035 */
1036 hang();
1037 break;
1038 }
1039 /*
1040 * txpdll, txpr, taonpd and taofpd are not relevant in LPDDR2 mode,
1041 * set them to 0. */
1042 txp = DIV_ROUND_UP(7500, clkper) - 1;
1043 tcke = 3;
1044 if (lpddr2_cfg->mem_speed == 333)
1045 tfaw = DIV_ROUND_UP(60000, clkper) - 1;
1046 else
1047 tfaw = DIV_ROUND_UP(50000, clkper) - 1;
1048 trrd = DIV_ROUND_UP(10000, clkper) - 1;
1049
1050 /* tckesr for LPDDR2 */
1051 tcksre = DIV_ROUND_UP(15000, clkper);
1052 tcksrx = tcksre;
1053 twr = DIV_ROUND_UP(15000, clkper) - 1;
1054 /*
1055 * tMRR: 2, tMRW: 5
1056 * tMRD should be set to max(tMRR, tMRW)
1057 */
1058 tmrd = 5;
1059 tras = DIV_ROUND_UP(lpddr2_cfg->trasmin, clkper / 10) - 1;
1060 /* LPDDR2 mode use tRCD_LP filed in MDCFG3. */
1061 trcd_lp = DIV_ROUND_UP(lpddr2_cfg->trcd_lp, clkper / 10) - 1;
1062 trc_lp = DIV_ROUND_UP(lpddr2_cfg->trasmin + lpddr2_cfg->trppb_lp,
1063 clkper / 10) - 1;
1064 trppb_lp = DIV_ROUND_UP(lpddr2_cfg->trppb_lp, clkper / 10) - 1;
1065 trpab_lp = DIV_ROUND_UP(lpddr2_cfg->trpab_lp, clkper / 10) - 1;
1066 /* To LPDDR2, CL in MDCFG0 refers to RL */
1067 tcl = lpddr2_rl(lpddr2_cfg->mem_speed) - 3;
1068 twtr = DIV_ROUND_UP(7500, clkper) - 1;
1069 trtp = DIV_ROUND_UP(7500, clkper) - 1;
1070
1071 cs0_end = 4 * sysinfo->cs_density - 1;
1072
1073 debug("density:%d Gb (%d Gb per chip)\n",
1074 sysinfo->cs_density, lpddr2_cfg->density);
1075 debug("clock: %dMHz (%d ps)\n", clock, clkper);
1076 debug("memspd:%d\n", lpddr2_cfg->mem_speed);
1077 debug("trcd_lp=%d\n", trcd_lp);
1078 debug("trppb_lp=%d\n", trppb_lp);
1079 debug("trpab_lp=%d\n", trpab_lp);
1080 debug("trc_lp=%d\n", trc_lp);
1081 debug("tcke=%d\n", tcke);
1082 debug("tcksrx=%d\n", tcksrx);
1083 debug("tcksre=%d\n", tcksre);
1084 debug("trfc=%d\n", trfc);
1085 debug("txsr=%d\n", txsr);
1086 debug("txp=%d\n", txp);
1087 debug("tfaw=%d\n", tfaw);
1088 debug("tcl=%d\n", tcl);
1089 debug("tras=%d\n", tras);
1090 debug("twr=%d\n", twr);
1091 debug("tmrd=%d\n", tmrd);
1092 debug("twl=%d\n", twl);
1093 debug("trtp=%d\n", trtp);
1094 debug("twtr=%d\n", twtr);
1095 debug("trrd=%d\n", trrd);
1096 debug("cs0_end=%d\n", cs0_end);
1097 debug("ncs=%d\n", sysinfo->ncs);
1098
1099 /*
1100 * board-specific configuration:
1101 * These values are determined empirically and vary per board layout
1102 */
1103 mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0;
1104 mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1;
1105 mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0;
1106 mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1;
1107 mmdc0->mprddlctl = calib->p0_mprddlctl;
1108 mmdc0->mpwrdlctl = calib->p0_mpwrdlctl;
1109 mmdc0->mpzqlp2ctl = calib->mpzqlp2ctl;
1110
1111 /* Read data DQ Byte0-3 delay */
1112 mmdc0->mprddqby0dl = 0x33333333;
1113 mmdc0->mprddqby1dl = 0x33333333;
1114 if (sysinfo->dsize > 0) {
1115 mmdc0->mprddqby2dl = 0x33333333;
1116 mmdc0->mprddqby3dl = 0x33333333;
1117 }
1118
1119 /* Write data DQ Byte0-3 delay */
1120 mmdc0->mpwrdqby0dl = 0xf3333333;
1121 mmdc0->mpwrdqby1dl = 0xf3333333;
1122 if (sysinfo->dsize > 0) {
1123 mmdc0->mpwrdqby2dl = 0xf3333333;
1124 mmdc0->mpwrdqby3dl = 0xf3333333;
1125 }
1126
1127 /*
1128 * In LPDDR2 mode this register should be cleared,
1129 * so no termination will be activated.
1130 */
1131 mmdc0->mpodtctrl = 0;
1132
1133 /* complete calibration */
1134 val = (1 << 11); /* Force measurement on delay-lines */
1135 mmdc0->mpmur0 = val;
1136
1137 /* Step 1: configuration request */
1138 mmdc0->mdscr = (u32)(1 << 15); /* config request */
1139
1140 /* Step 2: Timing configuration */
1141 mmdc0->mdcfg0 = (trfc << 24) | (txsr << 16) | (txp << 13) |
1142 (tfaw << 4) | tcl;
1143 mmdc0->mdcfg1 = (tras << 16) | (twr << 9) | (tmrd << 5) | twl;
1144 mmdc0->mdcfg2 = (trtp << 6) | (twtr << 3) | trrd;
1145 mmdc0->mdcfg3lp = (trc_lp << 16) | (trcd_lp << 8) |
1146 (trppb_lp << 4) | trpab_lp;
1147 mmdc0->mdotc = 0;
1148
1149 mmdc0->mdasp = cs0_end; /* CS addressing */
1150
1151 /* Step 3: Configure DDR type */
1152 mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) |
1153 (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) |
1154 (sysinfo->ralat << 6) | (1 << 3);
1155
1156 /* Step 4: Configure delay while leaving reset */
1157 mmdc0->mdor = (sysinfo->sde_to_rst << 8) |
1158 (sysinfo->rst_to_cke << 0);
1159
1160 /* Step 5: Configure DDR physical parameters (density and burst len) */
1161 coladdr = lpddr2_cfg->coladdr;
1162 if (lpddr2_cfg->coladdr == 8) /* 8-bit COL is 0x3 */
1163 coladdr += 4;
1164 else if (lpddr2_cfg->coladdr == 12) /* 12-bit COL is 0x4 */
1165 coladdr += 1;
1166 mmdc0->mdctl = (lpddr2_cfg->rowaddr - 11) << 24 | /* ROW */
1167 (coladdr - 9) << 20 | /* COL */
1168 (0 << 19) | /* Burst Length = 4 for LPDDR2 */
1169 (sysinfo->dsize << 16); /* DDR data bus size */
1170
1171 /* Step 6: Perform ZQ calibration */
1172 val = 0xa1390003; /* one-time HW ZQ calib */
1173 mmdc0->mpzqhwctrl = val;
1174
1175 /* Step 7: Enable MMDC with desired chip select */
1176 mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */
1177 ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
1178
1179 /* Step 8: Write Mode Registers to Init LPDDR2 devices */
1180 for (cs = 0; cs < sysinfo->ncs; cs++) {
1181 /* MR63: reset */
1182 mmdc0->mdscr = MR(63, 0, 3, cs);
1183 /* MR10: calibration,
1184 * 0xff is calibration command after intilization.
1185 */
1186 val = 0xA | (0xff << 8);
1187 mmdc0->mdscr = MR(val, 0, 3, cs);
1188 /* MR1 */
1189 val = 0x1 | (0x82 << 8);
1190 mmdc0->mdscr = MR(val, 0, 3, cs);
1191 /* MR2 */
1192 val = 0x2 | (0x04 << 8);
1193 mmdc0->mdscr = MR(val, 0, 3, cs);
1194 /* MR3 */
1195 val = 0x3 | (0x02 << 8);
1196 mmdc0->mdscr = MR(val, 0, 3, cs);
1197 }
1198
1199 /* Step 10: Power down control and self-refresh */
1200 mmdc0->mdpdc = (tcke & 0x7) << 16 |
1201 5 << 12 | /* PWDT_1: 256 cycles */
1202 5 << 8 | /* PWDT_0: 256 cycles */
1203 1 << 6 | /* BOTH_CS_PD */
1204 (tcksrx & 0x7) << 3 |
1205 (tcksre & 0x7);
1206 mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */
1207
1208 /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
1209 val = 0xa1310003;
1210 mmdc0->mpzqhwctrl = val;
1211
1212 /* Step 12: Configure and activate periodic refresh */
Fabio Estevamcb3c1212016-08-29 20:37:15 -03001213 mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11);
Peng Fanda7ada02015-08-17 16:11:04 +08001214
1215 /* Step 13: Deassert config request - init complete */
1216 mmdc0->mdscr = 0x00000000;
1217
1218 /* wait for auto-ZQ calibration to complete */
1219 mdelay(1);
1220}
1221
Peng Fan77e86952015-08-17 16:11:03 +08001222void mx6_ddr3_cfg(const struct mx6_ddr_sysinfo *sysinfo,
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001223 const struct mx6_mmdc_calibration *calib,
1224 const struct mx6_ddr3_cfg *ddr3_cfg)
Tim Harvey8ab871b2014-06-02 16:13:23 -07001225{
1226 volatile struct mmdc_p_regs *mmdc0;
1227 volatile struct mmdc_p_regs *mmdc1;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001228 u32 val;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001229 u8 tcke, tcksrx, tcksre, txpdll, taofpd, taonpd, trrd;
1230 u8 todtlon, taxpd, tanpd, tcwl, txp, tfaw, tcl;
1231 u8 todt_idle_off = 0x4; /* from DDR3 Script Aid spreadsheet */
1232 u16 trcd, trc, tras, twr, tmrd, trtp, trp, twtr, trfc, txs, txpr;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001233 u16 cs0_end;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001234 u16 tdllk = 0x1ff; /* DLL locking time: 512 cycles (JEDEC DDR3) */
Marek Vasut4a463602014-08-04 01:47:10 +02001235 u8 coladdr;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001236 int clkper; /* clock period in picoseconds */
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001237 int clock; /* clock freq in MHz */
Tim Harvey8ab871b2014-06-02 16:13:23 -07001238 int cs;
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001239 u16 mem_speed = ddr3_cfg->mem_speed;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001240
1241 mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
Fabio Estevam8548f972018-01-01 22:51:45 -02001242 if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl())
Peng Fan98f11a12015-07-20 19:28:33 +08001243 mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001244
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001245 /* Limit mem_speed for MX6D/MX6Q */
Peng Fan9dba13b2016-05-23 18:35:57 +08001246 if (is_mx6dq() || is_mx6dqp()) {
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001247 if (mem_speed > 1066)
1248 mem_speed = 1066; /* 1066 MT/s */
1249
Tim Harvey8ab871b2014-06-02 16:13:23 -07001250 tcwl = 4;
1251 }
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001252 /* Limit mem_speed for MX6S/MX6DL */
Tim Harvey8ab871b2014-06-02 16:13:23 -07001253 else {
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001254 if (mem_speed > 800)
1255 mem_speed = 800; /* 800 MT/s */
1256
Tim Harvey8ab871b2014-06-02 16:13:23 -07001257 tcwl = 3;
1258 }
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001259
1260 clock = mem_speed / 2;
1261 /*
1262 * Data rate of 1066 MT/s requires 533 MHz DDR3 clock, but MX6D/Q supports
1263 * up to 528 MHz, so reduce the clock to fit chip specs
1264 */
Peng Fan9dba13b2016-05-23 18:35:57 +08001265 if (is_mx6dq() || is_mx6dqp()) {
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001266 if (clock > 528)
1267 clock = 528; /* 528 MHz */
1268 }
1269
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001270 clkper = (1000 * 1000) / clock; /* pico seconds */
Tim Harvey8ab871b2014-06-02 16:13:23 -07001271 todtlon = tcwl;
1272 taxpd = tcwl;
1273 tanpd = tcwl;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001274
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001275 switch (ddr3_cfg->density) {
Tim Harvey8ab871b2014-06-02 16:13:23 -07001276 case 1: /* 1Gb per chip */
1277 trfc = DIV_ROUND_UP(110000, clkper) - 1;
1278 txs = DIV_ROUND_UP(120000, clkper) - 1;
1279 break;
1280 case 2: /* 2Gb per chip */
1281 trfc = DIV_ROUND_UP(160000, clkper) - 1;
1282 txs = DIV_ROUND_UP(170000, clkper) - 1;
1283 break;
1284 case 4: /* 4Gb per chip */
Peng Fanb96b74c2015-09-01 11:03:14 +08001285 trfc = DIV_ROUND_UP(260000, clkper) - 1;
1286 txs = DIV_ROUND_UP(270000, clkper) - 1;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001287 break;
1288 case 8: /* 8Gb per chip */
1289 trfc = DIV_ROUND_UP(350000, clkper) - 1;
1290 txs = DIV_ROUND_UP(360000, clkper) - 1;
1291 break;
1292 default:
1293 /* invalid density */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001294 puts("invalid chip density\n");
Tim Harvey8ab871b2014-06-02 16:13:23 -07001295 hang();
1296 break;
1297 }
1298 txpr = txs;
1299
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001300 switch (mem_speed) {
Tim Harvey8ab871b2014-06-02 16:13:23 -07001301 case 800:
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001302 txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
1303 tcke = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001304 if (ddr3_cfg->pagesz == 1) {
Tim Harvey8ab871b2014-06-02 16:13:23 -07001305 tfaw = DIV_ROUND_UP(40000, clkper) - 1;
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001306 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001307 } else {
1308 tfaw = DIV_ROUND_UP(50000, clkper) - 1;
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001309 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001310 }
1311 break;
1312 case 1066:
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001313 txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
1314 tcke = DIV_ROUND_UP(max(3 * clkper, 5625), clkper) - 1;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001315 if (ddr3_cfg->pagesz == 1) {
Tim Harvey8ab871b2014-06-02 16:13:23 -07001316 tfaw = DIV_ROUND_UP(37500, clkper) - 1;
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001317 trrd = DIV_ROUND_UP(max(4 * clkper, 7500), clkper) - 1;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001318 } else {
1319 tfaw = DIV_ROUND_UP(50000, clkper) - 1;
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001320 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001321 }
1322 break;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001323 default:
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001324 puts("invalid memory speed\n");
Tim Harvey8ab871b2014-06-02 16:13:23 -07001325 hang();
1326 break;
1327 }
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001328 txpdll = DIV_ROUND_UP(max(10 * clkper, 24000), clkper) - 1;
1329 tcksre = DIV_ROUND_UP(max(5 * clkper, 10000), clkper);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001330 taonpd = DIV_ROUND_UP(2000, clkper) - 1;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001331 tcksrx = tcksre;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001332 taofpd = taonpd;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001333 twr = DIV_ROUND_UP(15000, clkper) - 1;
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001334 tmrd = DIV_ROUND_UP(max(12 * clkper, 15000), clkper) - 1;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001335 trc = DIV_ROUND_UP(ddr3_cfg->trcmin, clkper / 10) - 1;
1336 tras = DIV_ROUND_UP(ddr3_cfg->trasmin, clkper / 10) - 1;
1337 tcl = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 3;
1338 trp = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 1;
Masahiro Yamadab62b39b2014-09-18 13:28:06 +09001339 twtr = ROUND(max(4 * clkper, 7500) / clkper, 1) - 1;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001340 trcd = trp;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001341 trtp = twtr;
Nikita Kiryanov4a50ec22014-08-20 15:08:58 +03001342 cs0_end = 4 * sysinfo->cs_density - 1;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001343
1344 debug("density:%d Gb (%d Gb per chip)\n",
1345 sysinfo->cs_density, ddr3_cfg->density);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001346 debug("clock: %dMHz (%d ps)\n", clock, clkper);
Nikolay Dimitrov99c25ff2015-04-22 18:37:31 +03001347 debug("memspd:%d\n", mem_speed);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001348 debug("tcke=%d\n", tcke);
1349 debug("tcksrx=%d\n", tcksrx);
1350 debug("tcksre=%d\n", tcksre);
1351 debug("taofpd=%d\n", taofpd);
1352 debug("taonpd=%d\n", taonpd);
1353 debug("todtlon=%d\n", todtlon);
1354 debug("tanpd=%d\n", tanpd);
1355 debug("taxpd=%d\n", taxpd);
1356 debug("trfc=%d\n", trfc);
1357 debug("txs=%d\n", txs);
1358 debug("txp=%d\n", txp);
1359 debug("txpdll=%d\n", txpdll);
1360 debug("tfaw=%d\n", tfaw);
1361 debug("tcl=%d\n", tcl);
1362 debug("trcd=%d\n", trcd);
1363 debug("trp=%d\n", trp);
1364 debug("trc=%d\n", trc);
1365 debug("tras=%d\n", tras);
1366 debug("twr=%d\n", twr);
1367 debug("tmrd=%d\n", tmrd);
1368 debug("tcwl=%d\n", tcwl);
1369 debug("tdllk=%d\n", tdllk);
1370 debug("trtp=%d\n", trtp);
1371 debug("twtr=%d\n", twtr);
1372 debug("trrd=%d\n", trrd);
1373 debug("txpr=%d\n", txpr);
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001374 debug("cs0_end=%d\n", cs0_end);
1375 debug("ncs=%d\n", sysinfo->ncs);
1376 debug("Rtt_wr=%d\n", sysinfo->rtt_wr);
1377 debug("Rtt_nom=%d\n", sysinfo->rtt_nom);
1378 debug("SRT=%d\n", ddr3_cfg->SRT);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001379 debug("twr=%d\n", twr);
1380
1381 /*
1382 * board-specific configuration:
1383 * These values are determined empirically and vary per board layout
1384 * see:
1385 * appnote, ddr3 spreadsheet
1386 */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001387 mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0;
1388 mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1;
1389 mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0;
1390 mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1;
1391 mmdc0->mprddlctl = calib->p0_mprddlctl;
1392 mmdc0->mpwrdlctl = calib->p0_mpwrdlctl;
1393 if (sysinfo->dsize > 1) {
Peng Fan2ecdd022014-12-30 17:24:01 +08001394 MMDC1(mpwldectrl0, calib->p1_mpwldectrl0);
1395 MMDC1(mpwldectrl1, calib->p1_mpwldectrl1);
1396 MMDC1(mpdgctrl0, calib->p1_mpdgctrl0);
1397 MMDC1(mpdgctrl1, calib->p1_mpdgctrl1);
1398 MMDC1(mprddlctl, calib->p1_mprddlctl);
1399 MMDC1(mpwrdlctl, calib->p1_mpwrdlctl);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001400 }
1401
1402 /* Read data DQ Byte0-3 delay */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001403 mmdc0->mprddqby0dl = 0x33333333;
1404 mmdc0->mprddqby1dl = 0x33333333;
1405 if (sysinfo->dsize > 0) {
1406 mmdc0->mprddqby2dl = 0x33333333;
1407 mmdc0->mprddqby3dl = 0x33333333;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001408 }
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001409
1410 if (sysinfo->dsize > 1) {
Peng Fan2ecdd022014-12-30 17:24:01 +08001411 MMDC1(mprddqby0dl, 0x33333333);
1412 MMDC1(mprddqby1dl, 0x33333333);
1413 MMDC1(mprddqby2dl, 0x33333333);
1414 MMDC1(mprddqby3dl, 0x33333333);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001415 }
1416
1417 /* MMDC Termination: rtt_nom:2 RZQ/2(120ohm), rtt_nom:1 RZQ/4(60ohm) */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001418 val = (sysinfo->rtt_nom == 2) ? 0x00011117 : 0x00022227;
1419 mmdc0->mpodtctrl = val;
1420 if (sysinfo->dsize > 1)
Peng Fan2ecdd022014-12-30 17:24:01 +08001421 MMDC1(mpodtctrl, val);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001422
1423 /* complete calibration */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001424 val = (1 << 11); /* Force measurement on delay-lines */
1425 mmdc0->mpmur0 = val;
1426 if (sysinfo->dsize > 1)
Peng Fan2ecdd022014-12-30 17:24:01 +08001427 MMDC1(mpmur0, val);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001428
1429 /* Step 1: configuration request */
1430 mmdc0->mdscr = (u32)(1 << 15); /* config request */
1431
1432 /* Step 2: Timing configuration */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001433 mmdc0->mdcfg0 = (trfc << 24) | (txs << 16) | (txp << 13) |
1434 (txpdll << 9) | (tfaw << 4) | tcl;
1435 mmdc0->mdcfg1 = (trcd << 29) | (trp << 26) | (trc << 21) |
1436 (tras << 16) | (1 << 15) /* trpa */ |
1437 (twr << 9) | (tmrd << 5) | tcwl;
1438 mmdc0->mdcfg2 = (tdllk << 16) | (trtp << 6) | (twtr << 3) | trrd;
1439 mmdc0->mdotc = (taofpd << 27) | (taonpd << 24) | (tanpd << 20) |
1440 (taxpd << 16) | (todtlon << 12) | (todt_idle_off << 4);
1441 mmdc0->mdasp = cs0_end; /* CS addressing */
Tim Harvey8ab871b2014-06-02 16:13:23 -07001442
1443 /* Step 3: Configure DDR type */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001444 mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) |
1445 (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) |
1446 (sysinfo->ralat << 6);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001447
1448 /* Step 4: Configure delay while leaving reset */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001449 mmdc0->mdor = (txpr << 16) | (sysinfo->sde_to_rst << 8) |
1450 (sysinfo->rst_to_cke << 0);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001451
1452 /* Step 5: Configure DDR physical parameters (density and burst len) */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001453 coladdr = ddr3_cfg->coladdr;
1454 if (ddr3_cfg->coladdr == 8) /* 8-bit COL is 0x3 */
Marek Vasut4a463602014-08-04 01:47:10 +02001455 coladdr += 4;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001456 else if (ddr3_cfg->coladdr == 12) /* 12-bit COL is 0x4 */
Marek Vasut4a463602014-08-04 01:47:10 +02001457 coladdr += 1;
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001458 mmdc0->mdctl = (ddr3_cfg->rowaddr - 11) << 24 | /* ROW */
1459 (coladdr - 9) << 20 | /* COL */
1460 (1 << 19) | /* Burst Length = 8 for DDR3 */
1461 (sysinfo->dsize << 16); /* DDR data bus size */
Tim Harvey8ab871b2014-06-02 16:13:23 -07001462
1463 /* Step 6: Perform ZQ calibration */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001464 val = 0xa1390001; /* one-time HW ZQ calib */
1465 mmdc0->mpzqhwctrl = val;
1466 if (sysinfo->dsize > 1)
Peng Fan2ecdd022014-12-30 17:24:01 +08001467 MMDC1(mpzqhwctrl, val);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001468
1469 /* Step 7: Enable MMDC with desired chip select */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001470 mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */
1471 ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
Tim Harvey8ab871b2014-06-02 16:13:23 -07001472
1473 /* Step 8: Write Mode Registers to Init DDR3 devices */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001474 for (cs = 0; cs < sysinfo->ncs; cs++) {
Tim Harvey8ab871b2014-06-02 16:13:23 -07001475 /* MR2 */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001476 val = (sysinfo->rtt_wr & 3) << 9 | (ddr3_cfg->SRT & 1) << 7 |
Tim Harvey8ab871b2014-06-02 16:13:23 -07001477 ((tcwl - 3) & 3) << 3;
Tim Harveyfe1723f2015-04-03 16:52:52 -07001478 debug("MR2 CS%d: 0x%08x\n", cs, (u32)MR(val, 2, 3, cs));
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001479 mmdc0->mdscr = MR(val, 2, 3, cs);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001480 /* MR3 */
Tim Harveyfe1723f2015-04-03 16:52:52 -07001481 debug("MR3 CS%d: 0x%08x\n", cs, (u32)MR(0, 3, 3, cs));
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001482 mmdc0->mdscr = MR(0, 3, 3, cs);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001483 /* MR1 */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001484 val = ((sysinfo->rtt_nom & 1) ? 1 : 0) << 2 |
1485 ((sysinfo->rtt_nom & 2) ? 1 : 0) << 6;
Tim Harveyfe1723f2015-04-03 16:52:52 -07001486 debug("MR1 CS%d: 0x%08x\n", cs, (u32)MR(val, 1, 3, cs));
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001487 mmdc0->mdscr = MR(val, 1, 3, cs);
1488 /* MR0 */
1489 val = ((tcl - 1) << 4) | /* CAS */
Tim Harvey8ab871b2014-06-02 16:13:23 -07001490 (1 << 8) | /* DLL Reset */
Tim Harvey591fe972015-05-18 07:07:02 -07001491 ((twr - 3) << 9) | /* Write Recovery */
1492 (sysinfo->pd_fast_exit << 12); /* Precharge PD PLL on */
Tim Harveyfe1723f2015-04-03 16:52:52 -07001493 debug("MR0 CS%d: 0x%08x\n", cs, (u32)MR(val, 0, 3, cs));
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001494 mmdc0->mdscr = MR(val, 0, 3, cs);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001495 /* ZQ calibration */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001496 val = (1 << 10);
1497 mmdc0->mdscr = MR(val, 0, 4, cs);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001498 }
1499
1500 /* Step 10: Power down control and self-refresh */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001501 mmdc0->mdpdc = (tcke & 0x7) << 16 |
1502 5 << 12 | /* PWDT_1: 256 cycles */
1503 5 << 8 | /* PWDT_0: 256 cycles */
1504 1 << 6 | /* BOTH_CS_PD */
1505 (tcksrx & 0x7) << 3 |
1506 (tcksre & 0x7);
Tim Harveyfe1723f2015-04-03 16:52:52 -07001507 if (!sysinfo->pd_fast_exit)
1508 mmdc0->mdpdc |= (1 << 7); /* SLOW_PD */
Nikita Kiryanov6816f712014-08-20 15:08:56 +03001509 mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */
Tim Harvey8ab871b2014-06-02 16:13:23 -07001510
1511 /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001512 val = 0xa1390003;
1513 mmdc0->mpzqhwctrl = val;
1514 if (sysinfo->dsize > 1)
Peng Fan2ecdd022014-12-30 17:24:01 +08001515 MMDC1(mpzqhwctrl, val);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001516
1517 /* Step 12: Configure and activate periodic refresh */
Fabio Estevamcb3c1212016-08-29 20:37:15 -03001518 mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11);
Tim Harvey8ab871b2014-06-02 16:13:23 -07001519
1520 /* Step 13: Deassert config request - init complete */
Nikita Kiryanovc4753462014-09-07 18:58:11 +03001521 mmdc0->mdscr = 0x00000000;
Tim Harvey8ab871b2014-06-02 16:13:23 -07001522
1523 /* wait for auto-ZQ calibration to complete */
1524 mdelay(1);
1525}
Peng Fan77e86952015-08-17 16:11:03 +08001526
Eric Nelsonec4fe262016-10-30 16:33:49 -07001527void mmdc_read_calibration(struct mx6_ddr_sysinfo const *sysinfo,
1528 struct mx6_mmdc_calibration *calib)
1529{
1530 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
1531 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
1532
1533 calib->p0_mpwldectrl0 = readl(&mmdc0->mpwldectrl0);
1534 calib->p0_mpwldectrl1 = readl(&mmdc0->mpwldectrl1);
1535 calib->p0_mpdgctrl0 = readl(&mmdc0->mpdgctrl0);
1536 calib->p0_mpdgctrl1 = readl(&mmdc0->mpdgctrl1);
1537 calib->p0_mprddlctl = readl(&mmdc0->mprddlctl);
1538 calib->p0_mpwrdlctl = readl(&mmdc0->mpwrdlctl);
1539
1540 if (sysinfo->dsize == 2) {
1541 calib->p1_mpwldectrl0 = readl(&mmdc1->mpwldectrl0);
1542 calib->p1_mpwldectrl1 = readl(&mmdc1->mpwldectrl1);
1543 calib->p1_mpdgctrl0 = readl(&mmdc1->mpdgctrl0);
1544 calib->p1_mpdgctrl1 = readl(&mmdc1->mpdgctrl1);
1545 calib->p1_mprddlctl = readl(&mmdc1->mprddlctl);
1546 calib->p1_mpwrdlctl = readl(&mmdc1->mpwrdlctl);
1547 }
1548}
1549
Peng Fan77e86952015-08-17 16:11:03 +08001550void mx6_dram_cfg(const struct mx6_ddr_sysinfo *sysinfo,
1551 const struct mx6_mmdc_calibration *calib,
1552 const void *ddr_cfg)
1553{
1554 if (sysinfo->ddr_type == DDR_TYPE_DDR3) {
1555 mx6_ddr3_cfg(sysinfo, calib, ddr_cfg);
Peng Fanda7ada02015-08-17 16:11:04 +08001556 } else if (sysinfo->ddr_type == DDR_TYPE_LPDDR2) {
1557 mx6_lpddr2_cfg(sysinfo, calib, ddr_cfg);
Peng Fan77e86952015-08-17 16:11:03 +08001558 } else {
1559 puts("Unsupported ddr type\n");
1560 hang();
1561 }
1562}