blob: b2f5414a5c6c31cf59f8445579ecb22c183af013 [file] [log] [blame]
Vitaly Andrianov7bcf4d62014-04-04 13:16:53 -04001/*
2 * Keystone2: DDR3 initialization
3 *
4 * (C) Copyright 2012-2014
5 * Texas Instruments Incorporated, <www.ti.com>
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
Vitaly Andrianov7bcf4d62014-04-04 13:16:53 -040010#include <asm/io.h>
Hao Zhangd6c508c2014-07-09 19:48:41 +030011#include <common.h>
Vitaly Andrianov19173012014-10-22 17:47:58 +030012#include <asm/arch/msmc.h>
Khoronzhuk, Ivan50df5cc2014-07-09 19:48:40 +030013#include <asm/arch/ddr3.h>
Murali Karicheri39f45202014-09-10 15:54:59 +030014#include <asm/arch/psc_defs.h>
Vitaly Andrianov7bcf4d62014-04-04 13:16:53 -040015
Vitaly Andrianov19173012014-10-22 17:47:58 +030016#include <asm/ti-common/ti-edma3.h>
17
18#define DDR3_EDMA_BLK_SIZE_SHIFT 10
19#define DDR3_EDMA_BLK_SIZE (1 << DDR3_EDMA_BLK_SIZE_SHIFT)
20#define DDR3_EDMA_BCNT 0x8000
21#define DDR3_EDMA_CCNT 1
22#define DDR3_EDMA_XF_SIZE (DDR3_EDMA_BLK_SIZE * DDR3_EDMA_BCNT)
23#define DDR3_EDMA_SLOT_NUM 1
24
Khoronzhuk, Ivan50df5cc2014-07-09 19:48:40 +030025void ddr3_init_ddrphy(u32 base, struct ddr3_phy_config *phy_cfg)
Vitaly Andrianov7bcf4d62014-04-04 13:16:53 -040026{
27 unsigned int tmp;
28
29 while ((__raw_readl(base + KS2_DDRPHY_PGSR0_OFFSET)
30 & 0x00000001) != 0x00000001)
31 ;
32
33 __raw_writel(phy_cfg->pllcr, base + KS2_DDRPHY_PLLCR_OFFSET);
34
35 tmp = __raw_readl(base + KS2_DDRPHY_PGCR1_OFFSET);
36 tmp &= ~(phy_cfg->pgcr1_mask);
37 tmp |= phy_cfg->pgcr1_val;
38 __raw_writel(tmp, base + KS2_DDRPHY_PGCR1_OFFSET);
39
40 __raw_writel(phy_cfg->ptr0, base + KS2_DDRPHY_PTR0_OFFSET);
41 __raw_writel(phy_cfg->ptr1, base + KS2_DDRPHY_PTR1_OFFSET);
42 __raw_writel(phy_cfg->ptr3, base + KS2_DDRPHY_PTR3_OFFSET);
43 __raw_writel(phy_cfg->ptr4, base + KS2_DDRPHY_PTR4_OFFSET);
44
45 tmp = __raw_readl(base + KS2_DDRPHY_DCR_OFFSET);
46 tmp &= ~(phy_cfg->dcr_mask);
47 tmp |= phy_cfg->dcr_val;
48 __raw_writel(tmp, base + KS2_DDRPHY_DCR_OFFSET);
49
50 __raw_writel(phy_cfg->dtpr0, base + KS2_DDRPHY_DTPR0_OFFSET);
51 __raw_writel(phy_cfg->dtpr1, base + KS2_DDRPHY_DTPR1_OFFSET);
52 __raw_writel(phy_cfg->dtpr2, base + KS2_DDRPHY_DTPR2_OFFSET);
53 __raw_writel(phy_cfg->mr0, base + KS2_DDRPHY_MR0_OFFSET);
54 __raw_writel(phy_cfg->mr1, base + KS2_DDRPHY_MR1_OFFSET);
Cooper Jr., Franklin2247db62017-06-16 17:25:20 -050055 __raw_writel(phy_cfg->mr2, base + KS2_DDRPHY_MR2_OFFSET);
Vitaly Andrianov7bcf4d62014-04-04 13:16:53 -040056 __raw_writel(phy_cfg->dtcr, base + KS2_DDRPHY_DTCR_OFFSET);
57 __raw_writel(phy_cfg->pgcr2, base + KS2_DDRPHY_PGCR2_OFFSET);
58
59 __raw_writel(phy_cfg->zq0cr1, base + KS2_DDRPHY_ZQ0CR1_OFFSET);
60 __raw_writel(phy_cfg->zq1cr1, base + KS2_DDRPHY_ZQ1CR1_OFFSET);
61 __raw_writel(phy_cfg->zq2cr1, base + KS2_DDRPHY_ZQ2CR1_OFFSET);
62
63 __raw_writel(phy_cfg->pir_v1, base + KS2_DDRPHY_PIR_OFFSET);
64 while ((__raw_readl(base + KS2_DDRPHY_PGSR0_OFFSET) & 0x1) != 0x1)
65 ;
66
Vitaly Andrianovbbf8ac22015-09-19 16:26:43 +053067 if (cpu_is_k2g()) {
Cooper Jr., Franklin87b4b202017-06-16 17:25:21 -050068 clrsetbits_le32(base + KS2_DDRPHY_DATX8_2_OFFSET,
69 phy_cfg->datx8_2_mask,
70 phy_cfg->datx8_2_val);
71
72 clrsetbits_le32(base + KS2_DDRPHY_DATX8_3_OFFSET,
73 phy_cfg->datx8_3_mask,
74 phy_cfg->datx8_3_val);
75
76 clrsetbits_le32(base + KS2_DDRPHY_DATX8_4_OFFSET,
77 phy_cfg->datx8_4_mask,
78 phy_cfg->datx8_4_val);
79
80 clrsetbits_le32(base + KS2_DDRPHY_DATX8_5_OFFSET,
81 phy_cfg->datx8_5_mask,
82 phy_cfg->datx8_5_val);
83
84 clrsetbits_le32(base + KS2_DDRPHY_DATX8_6_OFFSET,
85 phy_cfg->datx8_6_mask,
86 phy_cfg->datx8_6_val);
87
88 clrsetbits_le32(base + KS2_DDRPHY_DATX8_7_OFFSET,
89 phy_cfg->datx8_7_mask,
90 phy_cfg->datx8_7_val);
91
92 clrsetbits_le32(base + KS2_DDRPHY_DATX8_8_OFFSET,
93 phy_cfg->datx8_8_mask,
94 phy_cfg->datx8_8_val);
Vitaly Andrianovbbf8ac22015-09-19 16:26:43 +053095 }
96
Vitaly Andrianov7bcf4d62014-04-04 13:16:53 -040097 __raw_writel(phy_cfg->pir_v2, base + KS2_DDRPHY_PIR_OFFSET);
98 while ((__raw_readl(base + KS2_DDRPHY_PGSR0_OFFSET) & 0x1) != 0x1)
99 ;
100}
101
Khoronzhuk, Ivan50df5cc2014-07-09 19:48:40 +0300102void ddr3_init_ddremif(u32 base, struct ddr3_emif_config *emif_cfg)
Vitaly Andrianov7bcf4d62014-04-04 13:16:53 -0400103{
104 __raw_writel(emif_cfg->sdcfg, base + KS2_DDR3_SDCFG_OFFSET);
105 __raw_writel(emif_cfg->sdtim1, base + KS2_DDR3_SDTIM1_OFFSET);
106 __raw_writel(emif_cfg->sdtim2, base + KS2_DDR3_SDTIM2_OFFSET);
107 __raw_writel(emif_cfg->sdtim3, base + KS2_DDR3_SDTIM3_OFFSET);
108 __raw_writel(emif_cfg->sdtim4, base + KS2_DDR3_SDTIM4_OFFSET);
109 __raw_writel(emif_cfg->zqcfg, base + KS2_DDR3_ZQCFG_OFFSET);
110 __raw_writel(emif_cfg->sdrfc, base + KS2_DDR3_SDRFC_OFFSET);
111}
Hao Zhangd6c508c2014-07-09 19:48:41 +0300112
Vitaly Andrianov19173012014-10-22 17:47:58 +0300113int ddr3_ecc_support_rmw(u32 base)
114{
115 u32 value = __raw_readl(base + KS2_DDR3_MIDR_OFFSET);
116
117 /* Check the DDR3 controller ID reg if the controllers
118 supports ECC RMW or not */
119 if (value == 0x40461C02)
120 return 1;
121
122 return 0;
123}
124
125static void ddr3_ecc_config(u32 base, u32 value)
126{
127 u32 data;
128
129 __raw_writel(value, base + KS2_DDR3_ECC_CTRL_OFFSET);
130 udelay(100000); /* delay required to synchronize across clock domains */
131
132 if (value & KS2_DDR3_ECC_EN) {
133 /* Clear the 1-bit error count */
134 data = __raw_readl(base + KS2_DDR3_ONE_BIT_ECC_ERR_CNT_OFFSET);
135 __raw_writel(data, base + KS2_DDR3_ONE_BIT_ECC_ERR_CNT_OFFSET);
136
137 /* enable the ECC interrupt */
138 __raw_writel(KS2_DDR3_1B_ECC_ERR_SYS | KS2_DDR3_2B_ECC_ERR_SYS |
139 KS2_DDR3_WR_ECC_ERR_SYS,
140 base + KS2_DDR3_ECC_INT_ENABLE_SET_SYS_OFFSET);
141
142 /* Clear the ECC error interrupt status */
143 __raw_writel(KS2_DDR3_1B_ECC_ERR_SYS | KS2_DDR3_2B_ECC_ERR_SYS |
144 KS2_DDR3_WR_ECC_ERR_SYS,
145 base + KS2_DDR3_ECC_INT_STATUS_OFFSET);
146 }
147}
148
149static void ddr3_reset_data(u32 base, u32 ddr3_size)
150{
151 u32 mpax[2];
152 u32 seg_num;
153 u32 seg, blks, dst, edma_blks;
154 struct edma3_slot_config slot;
155 struct edma3_channel_config edma_channel;
156 u32 edma_src[DDR3_EDMA_BLK_SIZE/4] __aligned(16) = {0, };
157
158 /* Setup an edma to copy the 1k block to the entire DDR */
159 puts("\nClear entire DDR3 memory to enable ECC\n");
160
161 /* save the SES MPAX regs */
Nishanth Menon854b6822016-11-29 12:07:50 +0530162 if (cpu_is_k2g())
163 msmc_get_ses_mpax(K2G_MSMC_SEGMENT_ARM, 0, mpax);
164 else
165 msmc_get_ses_mpax(K2HKLE_MSMC_SEGMENT_ARM, 0, mpax);
Vitaly Andrianov19173012014-10-22 17:47:58 +0300166
167 /* setup edma slot 1 configuration */
168 slot.opt = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
169 EDMA3_SLOPT_COMP_CODE(0) |
170 EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
171 slot.bcnt = DDR3_EDMA_BCNT;
172 slot.acnt = DDR3_EDMA_BLK_SIZE;
173 slot.ccnt = DDR3_EDMA_CCNT;
174 slot.src_bidx = 0;
175 slot.dst_bidx = DDR3_EDMA_BLK_SIZE;
176 slot.src_cidx = 0;
177 slot.dst_cidx = 0;
178 slot.link = EDMA3_PARSET_NULL_LINK;
179 slot.bcntrld = 0;
180 edma3_slot_configure(KS2_EDMA0_BASE, DDR3_EDMA_SLOT_NUM, &slot);
181
182 /* configure quik edma channel */
183 edma_channel.slot = DDR3_EDMA_SLOT_NUM;
184 edma_channel.chnum = 0;
185 edma_channel.complete_code = 0;
186 /* event trigger after dst update */
187 edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
188 qedma3_start(KS2_EDMA0_BASE, &edma_channel);
189
190 /* DDR3 size in segments (4KB seg size) */
191 seg_num = ddr3_size << (30 - KS2_MSMC_SEG_SIZE_SHIFT);
192
193 for (seg = 0; seg < seg_num; seg += KS2_MSMC_MAP_SEG_NUM) {
194 /* map 2GB 36-bit DDR address to 32-bit DDR address in EMIF
195 access slave interface so that edma driver can access */
Nishanth Menon854b6822016-11-29 12:07:50 +0530196 if (cpu_is_k2g()) {
197 msmc_map_ses_segment(K2G_MSMC_SEGMENT_ARM, 0,
198 base >> KS2_MSMC_SEG_SIZE_SHIFT,
199 KS2_MSMC_DST_SEG_BASE + seg,
200 MPAX_SEG_2G);
201 } else {
202 msmc_map_ses_segment(K2HKLE_MSMC_SEGMENT_ARM, 0,
203 base >> KS2_MSMC_SEG_SIZE_SHIFT,
204 KS2_MSMC_DST_SEG_BASE + seg,
205 MPAX_SEG_2G);
206 }
Vitaly Andrianov19173012014-10-22 17:47:58 +0300207
208 if ((seg_num - seg) > KS2_MSMC_MAP_SEG_NUM)
209 edma_blks = KS2_MSMC_MAP_SEG_NUM <<
210 (KS2_MSMC_SEG_SIZE_SHIFT
211 - DDR3_EDMA_BLK_SIZE_SHIFT);
212 else
213 edma_blks = (seg_num - seg) << (KS2_MSMC_SEG_SIZE_SHIFT
214 - DDR3_EDMA_BLK_SIZE_SHIFT);
215
216 /* Use edma driver to scrub 2GB DDR memory */
217 for (dst = base, blks = 0; blks < edma_blks;
218 blks += DDR3_EDMA_BCNT, dst += DDR3_EDMA_XF_SIZE) {
219 edma3_set_src_addr(KS2_EDMA0_BASE,
220 edma_channel.slot, (u32)edma_src);
221 edma3_set_dest_addr(KS2_EDMA0_BASE,
222 edma_channel.slot, (u32)dst);
223
224 while (edma3_check_for_transfer(KS2_EDMA0_BASE,
225 &edma_channel))
226 udelay(10);
227 }
228 }
229
230 qedma3_stop(KS2_EDMA0_BASE, &edma_channel);
231
232 /* restore the SES MPAX regs */
Nishanth Menon854b6822016-11-29 12:07:50 +0530233 if (cpu_is_k2g())
234 msmc_set_ses_mpax(K2G_MSMC_SEGMENT_ARM, 0, mpax);
235 else
236 msmc_set_ses_mpax(K2HKLE_MSMC_SEGMENT_ARM, 0, mpax);
Vitaly Andrianov19173012014-10-22 17:47:58 +0300237}
238
239static void ddr3_ecc_init_range(u32 base)
240{
241 u32 ecc_val = KS2_DDR3_ECC_EN;
242 u32 rmw = ddr3_ecc_support_rmw(base);
243
244 if (rmw)
245 ecc_val |= KS2_DDR3_ECC_RMW_EN;
246
247 __raw_writel(0, base + KS2_DDR3_ECC_ADDR_RANGE1_OFFSET);
248
249 ddr3_ecc_config(base, ecc_val);
250}
251
252void ddr3_enable_ecc(u32 base, int test)
253{
254 u32 ecc_val = KS2_DDR3_ECC_ENABLE;
255 u32 rmw = ddr3_ecc_support_rmw(base);
256
257 if (test)
258 ecc_val |= KS2_DDR3_ECC_ADDR_RNG_1_EN;
259
260 if (!rmw) {
261 if (!test)
262 /* by default, disable ecc when rmw = 0 and no
263 ecc test */
264 ecc_val = 0;
265 } else {
266 ecc_val |= KS2_DDR3_ECC_RMW_EN;
267 }
268
269 ddr3_ecc_config(base, ecc_val);
270}
271
272void ddr3_disable_ecc(u32 base)
273{
274 ddr3_ecc_config(base, 0);
275}
276
277#if defined(CONFIG_SOC_K2HK) || defined(CONFIG_SOC_K2L)
278static void cic_init(u32 base)
279{
280 /* Disable CIC global interrupts */
281 __raw_writel(0, base + KS2_CIC_GLOBAL_ENABLE);
282
283 /* Set to normal mode, no nesting, no priority hold */
284 __raw_writel(0, base + KS2_CIC_CTRL);
285 __raw_writel(0, base + KS2_CIC_HOST_CTRL);
286
287 /* Enable CIC global interrupts */
288 __raw_writel(1, base + KS2_CIC_GLOBAL_ENABLE);
289}
290
291static void cic_map_cic_to_gic(u32 base, u32 chan_num, u32 irq_num)
292{
293 /* Map the system interrupt to a CIC channel */
294 __raw_writeb(chan_num, base + KS2_CIC_CHAN_MAP(0) + irq_num);
295
296 /* Enable CIC system interrupt */
297 __raw_writel(irq_num, base + KS2_CIC_SYS_ENABLE_IDX_SET);
298
299 /* Enable CIC Host interrupt */
300 __raw_writel(chan_num, base + KS2_CIC_HOST_ENABLE_IDX_SET);
301}
302
303static void ddr3_map_ecc_cic2_irq(u32 base)
304{
305 cic_init(base);
306 cic_map_cic_to_gic(base, KS2_CIC2_DDR3_ECC_CHAN_NUM,
307 KS2_CIC2_DDR3_ECC_IRQ_NUM);
308}
309#endif
310
Vitaly Andrianova9554d62015-02-11 14:07:58 -0500311void ddr3_init_ecc(u32 base, u32 ddr3_size)
Vitaly Andrianov19173012014-10-22 17:47:58 +0300312{
Vitaly Andrianov19173012014-10-22 17:47:58 +0300313 if (!ddr3_ecc_support_rmw(base)) {
314 ddr3_disable_ecc(base);
315 return;
316 }
317
318 ddr3_ecc_init_range(base);
Vitaly Andrianov19173012014-10-22 17:47:58 +0300319 ddr3_reset_data(CONFIG_SYS_SDRAM_BASE, ddr3_size);
320
321 /* mapping DDR3 ECC system interrupt from CIC2 to GIC */
322#if defined(CONFIG_SOC_K2HK) || defined(CONFIG_SOC_K2L)
323 ddr3_map_ecc_cic2_irq(KS2_CIC2_BASE);
324#endif
325 ddr3_enable_ecc(base, 0);
326}
327
328void ddr3_check_ecc_int(u32 base)
329{
330 char *env;
331 int ecc_test = 0;
332 u32 value = __raw_readl(base + KS2_DDR3_ECC_INT_STATUS_OFFSET);
333
Simon Glass64b723f2017-08-03 12:22:12 -0600334 env = env_get("ecc_test");
Vitaly Andrianov19173012014-10-22 17:47:58 +0300335 if (env)
336 ecc_test = simple_strtol(env, NULL, 0);
337
338 if (value & KS2_DDR3_WR_ECC_ERR_SYS)
339 puts("DDR3 ECC write error interrupted\n");
340
341 if (value & KS2_DDR3_2B_ECC_ERR_SYS) {
342 puts("DDR3 ECC 2-bit error interrupted\n");
343
344 if (!ecc_test) {
345 puts("Reseting the device ...\n");
346 reset_cpu(0);
347 }
348 }
349
350 value = __raw_readl(base + KS2_DDR3_ONE_BIT_ECC_ERR_CNT_OFFSET);
351 if (value) {
352 printf("1-bit ECC err count: 0x%x\n", value);
353 value = __raw_readl(base +
354 KS2_DDR3_ONE_BIT_ECC_ERR_ADDR_LOG_OFFSET);
355 printf("1-bit ECC err address log: 0x%x\n", value);
356 }
357}
358
Hao Zhangd6c508c2014-07-09 19:48:41 +0300359void ddr3_reset_ddrphy(void)
360{
361 u32 tmp;
362
363 /* Assert DDR3A PHY reset */
Khoronzhuk, Ivand5cb1bb2014-07-09 23:44:44 +0300364 tmp = readl(KS2_DDR3APLLCTL1);
Hao Zhangd6c508c2014-07-09 19:48:41 +0300365 tmp |= KS2_DDR3_PLLCTRL_PHY_RESET;
Khoronzhuk, Ivand5cb1bb2014-07-09 23:44:44 +0300366 writel(tmp, KS2_DDR3APLLCTL1);
Hao Zhangd6c508c2014-07-09 19:48:41 +0300367
368 /* wait 10us to catch the reset */
369 udelay(10);
370
371 /* Release DDR3A PHY reset */
Khoronzhuk, Ivand5cb1bb2014-07-09 23:44:44 +0300372 tmp = readl(KS2_DDR3APLLCTL1);
Hao Zhangd6c508c2014-07-09 19:48:41 +0300373 tmp &= ~KS2_DDR3_PLLCTRL_PHY_RESET;
Khoronzhuk, Ivand5cb1bb2014-07-09 23:44:44 +0300374 __raw_writel(tmp, KS2_DDR3APLLCTL1);
Hao Zhangd6c508c2014-07-09 19:48:41 +0300375}
Murali Karicheri39f45202014-09-10 15:54:59 +0300376
377#ifdef CONFIG_SOC_K2HK
378/**
379 * ddr3_reset_workaround - reset workaround in case if leveling error
380 * detected for PG 1.0 and 1.1 k2hk SoCs
381 */
382void ddr3_err_reset_workaround(void)
383{
384 unsigned int tmp;
385 unsigned int tmp_a;
386 unsigned int tmp_b;
387
388 /*
389 * Check for PGSR0 error bits of DDR3 PHY.
390 * Check for WLERR, QSGERR, WLAERR,
391 * RDERR, WDERR, REERR, WEERR error to see if they are set or not
392 */
393 tmp_a = __raw_readl(KS2_DDR3A_DDRPHYC + KS2_DDRPHY_PGSR0_OFFSET);
394 tmp_b = __raw_readl(KS2_DDR3B_DDRPHYC + KS2_DDRPHY_PGSR0_OFFSET);
395
396 if (((tmp_a & 0x0FE00000) != 0) || ((tmp_b & 0x0FE00000) != 0)) {
397 printf("DDR Leveling Error Detected!\n");
398 printf("DDR3A PGSR0 = 0x%x\n", tmp_a);
399 printf("DDR3B PGSR0 = 0x%x\n", tmp_b);
400
401 /*
402 * Write Keys to KICK registers to enable writes to registers
403 * in boot config space
404 */
405 __raw_writel(KS2_KICK0_MAGIC, KS2_KICK0);
406 __raw_writel(KS2_KICK1_MAGIC, KS2_KICK1);
407
408 /*
409 * Move DDR3A Module out of reset isolation by setting
410 * MDCTL23[12] = 0
411 */
412 tmp_a = __raw_readl(KS2_PSC_BASE +
413 PSC_REG_MDCTL(KS2_LPSC_EMIF4F_DDR3A));
414
415 tmp_a = PSC_REG_MDCTL_SET_RESET_ISO(tmp_a, 0);
416 __raw_writel(tmp_a, KS2_PSC_BASE +
417 PSC_REG_MDCTL(KS2_LPSC_EMIF4F_DDR3A));
418
419 /*
420 * Move DDR3B Module out of reset isolation by setting
421 * MDCTL24[12] = 0
422 */
423 tmp_b = __raw_readl(KS2_PSC_BASE +
424 PSC_REG_MDCTL(KS2_LPSC_EMIF4F_DDR3B));
425 tmp_b = PSC_REG_MDCTL_SET_RESET_ISO(tmp_b, 0);
426 __raw_writel(tmp_b, KS2_PSC_BASE +
427 PSC_REG_MDCTL(KS2_LPSC_EMIF4F_DDR3B));
428
429 /*
430 * Write 0x5A69 Key to RSTCTRL[15:0] to unlock writes
431 * to RSTCTRL and RSTCFG
432 */
433 tmp = __raw_readl(KS2_RSTCTRL);
434 tmp &= KS2_RSTCTRL_MASK;
435 tmp |= KS2_RSTCTRL_KEY;
436 __raw_writel(tmp, KS2_RSTCTRL);
437
438 /*
439 * Set PLL Controller to drive hard reset on SW trigger by
440 * setting RSTCFG[13] = 0
441 */
442 tmp = __raw_readl(KS2_RSTCTRL_RSCFG);
443 tmp &= ~KS2_RSTYPE_PLL_SOFT;
444 __raw_writel(tmp, KS2_RSTCTRL_RSCFG);
445
446 reset_cpu(0);
447 }
448}
449#endif