blob: cd4234f389ebe4db08f26cea61114464c4fa4cd0 [file] [log] [blame]
Kever Yang6fc9ebf2018-12-20 11:33:42 +08001// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
Kever Yang1a94b9e2017-09-27 16:38:22 +08002/*
3 * (C) Copyright 2017 Rockchip Electronics Co., Ltd
Kever Yang1a94b9e2017-09-27 16:38:22 +08004 */
5#include <common.h>
6#include <clk.h>
7#include <dm.h>
8#include <dt-structs.h>
9#include <errno.h>
Simon Glass97589732020-05-10 11:40:02 -060010#include <init.h>
Kever Yang1a94b9e2017-09-27 16:38:22 +080011#include <ram.h>
12#include <regmap.h>
13#include <syscon.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060014#include <asm/global_data.h>
Kever Yang1a94b9e2017-09-27 16:38:22 +080015#include <asm/io.h>
Kever Yang9fbe17c2019-03-28 11:01:23 +080016#include <asm/arch-rockchip/clock.h>
17#include <asm/arch-rockchip/cru_rk322x.h>
18#include <asm/arch-rockchip/grf_rk322x.h>
19#include <asm/arch-rockchip/hardware.h>
20#include <asm/arch-rockchip/sdram_rk322x.h>
Kever Yang9fbe17c2019-03-28 11:01:23 +080021#include <asm/arch-rockchip/uart.h>
Kever Yange47db832019-11-15 11:04:33 +080022#include <asm/arch-rockchip/sdram.h>
Kever Yang1a94b9e2017-09-27 16:38:22 +080023#include <asm/types.h>
Simon Glassdbd79542020-05-10 11:40:11 -060024#include <linux/delay.h>
Kever Yang1a94b9e2017-09-27 16:38:22 +080025#include <linux/err.h>
26
27DECLARE_GLOBAL_DATA_PTR;
28struct chan_info {
29 struct rk322x_ddr_pctl *pctl;
30 struct rk322x_ddr_phy *phy;
31 struct rk322x_service_sys *msch;
32};
33
34struct dram_info {
35 struct chan_info chan[1];
36 struct ram_info info;
37 struct clk ddr_clk;
38 struct rk322x_cru *cru;
39 struct rk322x_grf *grf;
40};
41
42struct rk322x_sdram_params {
43#if CONFIG_IS_ENABLED(OF_PLATDATA)
44 struct dtd_rockchip_rk3228_dmc of_plat;
45#endif
46 struct rk322x_sdram_channel ch[1];
47 struct rk322x_pctl_timing pctl_timing;
48 struct rk322x_phy_timing phy_timing;
49 struct rk322x_base_params base;
50 int num_channels;
51 struct regmap *map;
52};
53
Kever Yang956798c2019-04-02 20:41:19 +080054#ifdef CONFIG_TPL_BUILD
Kever Yang1a94b9e2017-09-27 16:38:22 +080055/*
56 * [7:6] bank(n:n bit bank)
57 * [5:4] row(13+n)
58 * [3] cs(0:1 cs, 1:2 cs)
59 * [2:1] bank(n:n bit bank)
60 * [0] col(10+n)
61 */
62const char ddr_cfg_2_rbc[] = {
63 ((0 << 6) | (0 << 4) | (0 << 3) | (1 << 2) | 1),
64 ((0 << 6) | (1 << 4) | (0 << 3) | (1 << 2) | 1),
65 ((0 << 6) | (2 << 4) | (0 << 3) | (1 << 2) | 1),
66 ((0 << 6) | (3 << 4) | (0 << 3) | (1 << 2) | 1),
67 ((0 << 6) | (1 << 4) | (0 << 3) | (1 << 2) | 2),
68 ((0 << 6) | (2 << 4) | (0 << 3) | (1 << 2) | 2),
69 ((0 << 6) | (3 << 4) | (0 << 3) | (1 << 2) | 2),
70 ((0 << 6) | (0 << 4) | (0 << 3) | (1 << 2) | 0),
71 ((0 << 6) | (1 << 4) | (0 << 3) | (1 << 2) | 0),
72 ((0 << 6) | (2 << 4) | (0 << 3) | (1 << 2) | 0),
73 ((0 << 6) | (3 << 4) | (0 << 3) | (1 << 2) | 0),
74 ((0 << 6) | (2 << 4) | (0 << 3) | (0 << 2) | 1),
75 ((1 << 6) | (1 << 4) | (0 << 3) | (0 << 2) | 2),
76 ((1 << 6) | (1 << 4) | (0 << 3) | (0 << 2) | 1),
77 ((0 << 6) | (3 << 4) | (1 << 3) | (1 << 2) | 1),
78 ((0 << 6) | (3 << 4) | (1 << 3) | (1 << 2) | 0),
79};
80
81static void copy_to_reg(u32 *dest, const u32 *src, u32 n)
82{
83 int i;
84
85 for (i = 0; i < n / sizeof(u32); i++) {
86 writel(*src, dest);
87 src++;
88 dest++;
89 }
90}
91
92void phy_pctrl_reset(struct rk322x_cru *cru,
93 struct rk322x_ddr_phy *ddr_phy)
94{
95 rk_clrsetreg(&cru->cru_softrst_con[5], 1 << DDRCTRL_PSRST_SHIFT |
96 1 << DDRCTRL_SRST_SHIFT | 1 << DDRPHY_PSRST_SHIFT |
97 1 << DDRPHY_SRST_SHIFT,
98 1 << DDRCTRL_PSRST_SHIFT | 1 << DDRCTRL_SRST_SHIFT |
99 1 << DDRPHY_PSRST_SHIFT | 1 << DDRPHY_SRST_SHIFT);
100
Kever Yangde710212019-07-09 22:00:22 +0800101 udelay(10);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800102
103 rk_clrreg(&cru->cru_softrst_con[5], 1 << DDRPHY_PSRST_SHIFT |
104 1 << DDRPHY_SRST_SHIFT);
Kever Yangde710212019-07-09 22:00:22 +0800105 udelay(10);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800106
107 rk_clrreg(&cru->cru_softrst_con[5], 1 << DDRCTRL_PSRST_SHIFT |
108 1 << DDRCTRL_SRST_SHIFT);
Kever Yangde710212019-07-09 22:00:22 +0800109 udelay(10);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800110
111 clrbits_le32(&ddr_phy->ddrphy_reg[0],
112 SOFT_RESET_MASK << SOFT_RESET_SHIFT);
Kever Yangde710212019-07-09 22:00:22 +0800113 udelay(10);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800114 setbits_le32(&ddr_phy->ddrphy_reg[0],
115 SOFT_DERESET_ANALOG);
Kever Yangde710212019-07-09 22:00:22 +0800116 udelay(5);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800117 setbits_le32(&ddr_phy->ddrphy_reg[0],
118 SOFT_DERESET_DIGITAL);
119
Kever Yangde710212019-07-09 22:00:22 +0800120 udelay(1);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800121}
122
123void phy_dll_bypass_set(struct rk322x_ddr_phy *ddr_phy, u32 freq)
124{
125 u32 tmp;
126
127 setbits_le32(&ddr_phy->ddrphy_reg[0x13], 0x10);
128 setbits_le32(&ddr_phy->ddrphy_reg[0x26], 0x10);
129 setbits_le32(&ddr_phy->ddrphy_reg[0x36], 0x10);
130 setbits_le32(&ddr_phy->ddrphy_reg[0x46], 0x10);
131 setbits_le32(&ddr_phy->ddrphy_reg[0x56], 0x10);
132
133 clrbits_le32(&ddr_phy->ddrphy_reg[0x14], 0x8);
134 clrbits_le32(&ddr_phy->ddrphy_reg[0x27], 0x8);
135 clrbits_le32(&ddr_phy->ddrphy_reg[0x37], 0x8);
136 clrbits_le32(&ddr_phy->ddrphy_reg[0x47], 0x8);
137 clrbits_le32(&ddr_phy->ddrphy_reg[0x57], 0x8);
138
139 if (freq <= 400)
140 setbits_le32(&ddr_phy->ddrphy_reg[0xa4], 0x1f);
141 else
142 clrbits_le32(&ddr_phy->ddrphy_reg[0xa4], 0x1f);
143
144 if (freq <= 680)
145 tmp = 3;
146 else
147 tmp = 2;
148
149 writel(tmp, &ddr_phy->ddrphy_reg[0x28]);
150 writel(tmp, &ddr_phy->ddrphy_reg[0x38]);
151 writel(tmp, &ddr_phy->ddrphy_reg[0x48]);
152 writel(tmp, &ddr_phy->ddrphy_reg[0x58]);
153}
154
155static void send_command(struct rk322x_ddr_pctl *pctl,
156 u32 rank, u32 cmd, u32 arg)
157{
158 writel((START_CMD | (rank << 20) | arg | cmd), &pctl->mcmd);
Kever Yangde710212019-07-09 22:00:22 +0800159 udelay(1);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800160 while (readl(&pctl->mcmd) & START_CMD)
161 ;
162}
163
164static void memory_init(struct chan_info *chan,
165 struct rk322x_sdram_params *sdram_params)
166{
167 struct rk322x_ddr_pctl *pctl = chan->pctl;
168 u32 dramtype = sdram_params->base.dramtype;
169
170 if (dramtype == DDR3) {
171 send_command(pctl, 3, DESELECT_CMD, 0);
Kever Yangde710212019-07-09 22:00:22 +0800172 udelay(1);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800173 send_command(pctl, 3, PREA_CMD, 0);
174 send_command(pctl, 3, MRS_CMD,
175 (0x02 & BANK_ADDR_MASK) << BANK_ADDR_SHIFT |
176 (sdram_params->phy_timing.mr[2] & CMD_ADDR_MASK) <<
177 CMD_ADDR_SHIFT);
178
179 send_command(pctl, 3, MRS_CMD,
180 (0x03 & BANK_ADDR_MASK) << BANK_ADDR_SHIFT |
181 (sdram_params->phy_timing.mr[3] & CMD_ADDR_MASK) <<
182 CMD_ADDR_SHIFT);
183
184 send_command(pctl, 3, MRS_CMD,
185 (0x01 & BANK_ADDR_MASK) << BANK_ADDR_SHIFT |
186 (sdram_params->phy_timing.mr[1] & CMD_ADDR_MASK) <<
187 CMD_ADDR_SHIFT);
188
189 send_command(pctl, 3, MRS_CMD,
190 (0x00 & BANK_ADDR_MASK) << BANK_ADDR_SHIFT |
191 ((sdram_params->phy_timing.mr[0] |
192 DDR3_DLL_RESET) &
193 CMD_ADDR_MASK) << CMD_ADDR_SHIFT);
194
195 send_command(pctl, 3, ZQCL_CMD, 0);
196 } else {
197 send_command(pctl, 3, MRS_CMD,
198 (0x63 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT |
199 (0 & LPDDR23_OP_MASK) <<
200 LPDDR23_OP_SHIFT);
Kever Yangde710212019-07-09 22:00:22 +0800201 udelay(10);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800202 send_command(pctl, 3, MRS_CMD,
203 (0x10 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT |
204 (0xff & LPDDR23_OP_MASK) <<
205 LPDDR23_OP_SHIFT);
Kever Yangde710212019-07-09 22:00:22 +0800206 udelay(1);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800207 send_command(pctl, 3, MRS_CMD,
208 (0x10 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT |
209 (0xff & LPDDR23_OP_MASK) <<
210 LPDDR23_OP_SHIFT);
Kever Yangde710212019-07-09 22:00:22 +0800211 udelay(1);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800212 send_command(pctl, 3, MRS_CMD,
213 (1 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT |
214 (sdram_params->phy_timing.mr[1] &
215 LPDDR23_OP_MASK) << LPDDR23_OP_SHIFT);
216 send_command(pctl, 3, MRS_CMD,
217 (2 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT |
218 (sdram_params->phy_timing.mr[2] &
219 LPDDR23_OP_MASK) << LPDDR23_OP_SHIFT);
220 send_command(pctl, 3, MRS_CMD,
221 (3 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT |
222 (sdram_params->phy_timing.mr[3] &
223 LPDDR23_OP_MASK) << LPDDR23_OP_SHIFT);
224 if (dramtype == LPDDR3)
225 send_command(pctl, 3, MRS_CMD, (11 & LPDDR23_MA_MASK) <<
226 LPDDR23_MA_SHIFT |
227 (sdram_params->phy_timing.mr11 &
228 LPDDR23_OP_MASK) << LPDDR23_OP_SHIFT);
229 }
230}
231
232static u32 data_training(struct chan_info *chan)
233{
234 struct rk322x_ddr_phy *ddr_phy = chan->phy;
235 struct rk322x_ddr_pctl *pctl = chan->pctl;
236 u32 value;
237 u32 bw = (readl(&ddr_phy->ddrphy_reg[0]) >> 4) & 0xf;
238 u32 ret;
239
240 /* disable auto refresh */
241 value = readl(&pctl->trefi) | (1 << 31);
242 writel(1 << 31, &pctl->trefi);
243
244 clrsetbits_le32(&ddr_phy->ddrphy_reg[2], 0x30,
245 DQS_SQU_CAL_SEL_CS0);
246 setbits_le32(&ddr_phy->ddrphy_reg[2], DQS_SQU_CAL_START);
247
Kever Yangde710212019-07-09 22:00:22 +0800248 udelay(30);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800249 ret = readl(&ddr_phy->ddrphy_reg[0xff]);
250
251 clrbits_le32(&ddr_phy->ddrphy_reg[2],
252 DQS_SQU_CAL_START);
253
254 /*
255 * since data training will take about 20us, so send some auto
256 * refresh(about 7.8us) to complement the lost time
257 */
258 send_command(pctl, 3, PREA_CMD, 0);
259 send_command(pctl, 3, REF_CMD, 0);
260
261 writel(value, &pctl->trefi);
262
263 if (ret & 0x10) {
264 ret = -1;
265 } else {
266 ret = (ret & 0xf) ^ bw;
267 ret = (ret == 0) ? 0 : -1;
268 }
269 return ret;
270}
271
272static void move_to_config_state(struct rk322x_ddr_pctl *pctl)
273{
274 unsigned int state;
275
276 while (1) {
277 state = readl(&pctl->stat) & PCTL_STAT_MASK;
278 switch (state) {
279 case LOW_POWER:
280 writel(WAKEUP_STATE, &pctl->sctl);
281 while ((readl(&pctl->stat) & PCTL_STAT_MASK)
282 != ACCESS)
283 ;
284 /*
285 * If at low power state, need wakeup first, and then
286 * enter the config, so fallthrough
287 */
288 case ACCESS:
289 /* fallthrough */
290 case INIT_MEM:
291 writel(CFG_STATE, &pctl->sctl);
292 while ((readl(&pctl->stat) & PCTL_STAT_MASK) != CONFIG)
293 ;
294 break;
295 case CONFIG:
296 return;
297 default:
298 break;
299 }
300 }
301}
302
303static void move_to_access_state(struct rk322x_ddr_pctl *pctl)
304{
305 unsigned int state;
306
307 while (1) {
308 state = readl(&pctl->stat) & PCTL_STAT_MASK;
309 switch (state) {
310 case LOW_POWER:
311 writel(WAKEUP_STATE, &pctl->sctl);
312 while ((readl(&pctl->stat) & PCTL_STAT_MASK) != ACCESS)
313 ;
314 break;
315 case INIT_MEM:
316 writel(CFG_STATE, &pctl->sctl);
317 while ((readl(&pctl->stat) & PCTL_STAT_MASK) != CONFIG)
318 ;
319 /* fallthrough */
320 case CONFIG:
321 writel(GO_STATE, &pctl->sctl);
322 while ((readl(&pctl->stat) & PCTL_STAT_MASK) != ACCESS)
323 ;
324 break;
325 case ACCESS:
326 return;
327 default:
328 break;
329 }
330 }
331}
332
333static void move_to_lowpower_state(struct rk322x_ddr_pctl *pctl)
334{
335 unsigned int state;
336
337 while (1) {
338 state = readl(&pctl->stat) & PCTL_STAT_MASK;
339 switch (state) {
340 case INIT_MEM:
341 writel(CFG_STATE, &pctl->sctl);
342 while ((readl(&pctl->stat) & PCTL_STAT_MASK) != CONFIG)
343 ;
344 /* fallthrough */
345 case CONFIG:
346 writel(GO_STATE, &pctl->sctl);
347 while ((readl(&pctl->stat) & PCTL_STAT_MASK) != ACCESS)
348 ;
349 break;
350 case ACCESS:
351 writel(SLEEP_STATE, &pctl->sctl);
352 while ((readl(&pctl->stat) & PCTL_STAT_MASK) !=
353 LOW_POWER)
354 ;
355 break;
356 case LOW_POWER:
357 return;
358 default:
359 break;
360 }
361 }
362}
363
364/* pctl should in low power mode when call this function */
365static void phy_softreset(struct dram_info *dram)
366{
367 struct rk322x_ddr_phy *ddr_phy = dram->chan[0].phy;
368 struct rk322x_grf *grf = dram->grf;
369
370 writel(GRF_DDRPHY_BUFFEREN_CORE_EN, &grf->soc_con[0]);
371 clrbits_le32(&ddr_phy->ddrphy_reg[0], 0x3 << 2);
Kever Yangde710212019-07-09 22:00:22 +0800372 udelay(1);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800373 setbits_le32(&ddr_phy->ddrphy_reg[0], 1 << 2);
Kever Yangde710212019-07-09 22:00:22 +0800374 udelay(5);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800375 setbits_le32(&ddr_phy->ddrphy_reg[0], 1 << 3);
376 writel(GRF_DDRPHY_BUFFEREN_CORE_DIS, &grf->soc_con[0]);
377}
378
379/* bw: 2: 32bit, 1:16bit */
380static void set_bw(struct dram_info *dram, u32 bw)
381{
382 struct rk322x_ddr_pctl *pctl = dram->chan[0].pctl;
383 struct rk322x_ddr_phy *ddr_phy = dram->chan[0].phy;
384 struct rk322x_grf *grf = dram->grf;
385
386 if (bw == 1) {
387 setbits_le32(&pctl->ppcfg, 1);
388 clrbits_le32(&ddr_phy->ddrphy_reg[0], 0xc << 4);
389 writel(GRF_MSCH_NOC_16BIT_EN, &grf->soc_con[0]);
390 clrbits_le32(&ddr_phy->ddrphy_reg[0x46], 0x8);
391 clrbits_le32(&ddr_phy->ddrphy_reg[0x56], 0x8);
392 } else {
393 clrbits_le32(&pctl->ppcfg, 1);
394 setbits_le32(&ddr_phy->ddrphy_reg[0], 0xf << 4);
395 writel(GRF_DDR_32BIT_EN | GRF_MSCH_NOC_32BIT_EN,
396 &grf->soc_con[0]);
397 setbits_le32(&ddr_phy->ddrphy_reg[0x46], 0x8);
398 setbits_le32(&ddr_phy->ddrphy_reg[0x56], 0x8);
399 }
400}
401
402static void pctl_cfg(struct rk322x_ddr_pctl *pctl,
403 struct rk322x_sdram_params *sdram_params,
404 struct rk322x_grf *grf)
405{
406 u32 burst_len;
407 u32 bw;
408 u32 dramtype = sdram_params->base.dramtype;
409
410 if (sdram_params->ch[0].bw == 2)
411 bw = GRF_DDR_32BIT_EN | GRF_MSCH_NOC_32BIT_EN;
412 else
413 bw = GRF_MSCH_NOC_16BIT_EN;
414
415 writel(DFI_INIT_START | DFI_DATA_BYTE_DISABLE_EN, &pctl->dfistcfg0);
416 writel(DFI_DRAM_CLK_SR_EN | DFI_DRAM_CLK_DPD_EN, &pctl->dfistcfg1);
417 writel(DFI_PARITY_INTR_EN | DFI_PARITY_EN, &pctl->dfistcfg2);
418 writel(0x51010, &pctl->dfilpcfg0);
419
420 writel(1, &pctl->dfitphyupdtype0);
421 writel(0x0d, &pctl->dfitphyrdlat);
422 writel(0, &pctl->dfitphywrdata);
423
424 writel(0, &pctl->dfiupdcfg);
425 copy_to_reg(&pctl->togcnt1u, &sdram_params->pctl_timing.togcnt1u,
426 sizeof(struct rk322x_pctl_timing));
427 if (dramtype == DDR3) {
428 writel((1 << 3) | (1 << 11),
429 &pctl->dfiodtcfg);
430 writel(7 << 16, &pctl->dfiodtcfg1);
431 writel((readl(&pctl->tcl) - 1) / 2 - 1, &pctl->dfitrddataen);
432 writel((readl(&pctl->tcwl) - 1) / 2 - 1, &pctl->dfitphywrlat);
433 writel(500, &pctl->trsth);
434 writel(0 << MDDR_LPDDR2_CLK_STOP_IDLE_SHIFT | DDR3_EN |
435 DDR2_DDR3_BL_8 | (6 - 4) << TFAW_SHIFT | PD_EXIT_SLOW |
436 1 << PD_TYPE_SHIFT | 0 << PD_IDLE_SHIFT,
437 &pctl->mcfg);
438 writel(bw | GRF_DDR3_EN, &grf->soc_con[0]);
439 } else {
440 if (sdram_params->phy_timing.bl & PHT_BL_8)
441 burst_len = MDDR_LPDDR2_BL_8;
442 else
443 burst_len = MDDR_LPDDR2_BL_4;
444
445 writel(readl(&pctl->tcl) / 2 - 1, &pctl->dfitrddataen);
446 writel(readl(&pctl->tcwl) / 2 - 1, &pctl->dfitphywrlat);
447 writel(0, &pctl->trsth);
448 if (dramtype == LPDDR2) {
449 writel(0 << MDDR_LPDDR2_CLK_STOP_IDLE_SHIFT |
450 LPDDR2_S4 | LPDDR2_EN | burst_len |
451 (6 - 4) << TFAW_SHIFT | PD_EXIT_FAST |
452 1 << PD_TYPE_SHIFT | 0 << PD_IDLE_SHIFT,
453 &pctl->mcfg);
454 writel(0, &pctl->dfiodtcfg);
455 writel(0, &pctl->dfiodtcfg1);
456 } else {
457 writel(0 << MDDR_LPDDR2_CLK_STOP_IDLE_SHIFT |
458 LPDDR2_S4 | LPDDR3_EN | burst_len |
459 (6 - 4) << TFAW_SHIFT | PD_EXIT_FAST |
460 1 << PD_TYPE_SHIFT | 0 << PD_IDLE_SHIFT,
461 &pctl->mcfg);
462 writel((1 << 3) | (1 << 2), &pctl->dfiodtcfg);
463 writel((7 << 16) | 4, &pctl->dfiodtcfg1);
464 }
465 writel(bw | GRF_LPDDR2_3_EN, &grf->soc_con[0]);
466 }
467 setbits_le32(&pctl->scfg, 1);
468}
469
470static void phy_cfg(struct chan_info *chan,
471 struct rk322x_sdram_params *sdram_params)
472{
473 struct rk322x_ddr_phy *ddr_phy = chan->phy;
474 struct rk322x_service_sys *axi_bus = chan->msch;
475 struct rk322x_msch_timings *noc_timing = &sdram_params->base.noc_timing;
476 struct rk322x_phy_timing *phy_timing = &sdram_params->phy_timing;
477 struct rk322x_pctl_timing *pctl_timing = &sdram_params->pctl_timing;
478 u32 cmd_drv, clk_drv, dqs_drv, dqs_odt;
479
480 writel(noc_timing->ddrtiming, &axi_bus->ddrtiming);
481 writel(noc_timing->ddrmode, &axi_bus->ddrmode);
482 writel(noc_timing->readlatency, &axi_bus->readlatency);
483 writel(noc_timing->activate, &axi_bus->activate);
484 writel(noc_timing->devtodev, &axi_bus->devtodev);
485
486 switch (sdram_params->base.dramtype) {
487 case DDR3:
488 writel(PHY_DDR3 | phy_timing->bl, &ddr_phy->ddrphy_reg[1]);
489 break;
490 case LPDDR2:
491 writel(PHY_LPDDR2 | phy_timing->bl, &ddr_phy->ddrphy_reg[1]);
492 break;
493 default:
494 writel(PHY_LPDDR2 | phy_timing->bl, &ddr_phy->ddrphy_reg[1]);
495 break;
496 }
497
498 writel(phy_timing->cl_al, &ddr_phy->ddrphy_reg[0xb]);
499 writel(pctl_timing->tcwl, &ddr_phy->ddrphy_reg[0xc]);
500
501 cmd_drv = PHY_RON_RTT_34OHM;
502 clk_drv = PHY_RON_RTT_45OHM;
503 dqs_drv = PHY_RON_RTT_34OHM;
504 if (sdram_params->base.dramtype == LPDDR2)
505 dqs_odt = PHY_RON_RTT_DISABLE;
506 else
507 dqs_odt = PHY_RON_RTT_225OHM;
508
509 writel(cmd_drv, &ddr_phy->ddrphy_reg[0x11]);
510 clrsetbits_le32(&ddr_phy->ddrphy_reg[0x12], (0x1f << 3), cmd_drv << 3);
511 writel(clk_drv, &ddr_phy->ddrphy_reg[0x16]);
512 writel(clk_drv, &ddr_phy->ddrphy_reg[0x18]);
513
514 writel(dqs_drv, &ddr_phy->ddrphy_reg[0x20]);
515 writel(dqs_drv, &ddr_phy->ddrphy_reg[0x2f]);
516 writel(dqs_drv, &ddr_phy->ddrphy_reg[0x30]);
517 writel(dqs_drv, &ddr_phy->ddrphy_reg[0x3f]);
518 writel(dqs_drv, &ddr_phy->ddrphy_reg[0x40]);
519 writel(dqs_drv, &ddr_phy->ddrphy_reg[0x4f]);
520 writel(dqs_drv, &ddr_phy->ddrphy_reg[0x50]);
521 writel(dqs_drv, &ddr_phy->ddrphy_reg[0x5f]);
522
523 writel(dqs_odt, &ddr_phy->ddrphy_reg[0x21]);
524 writel(dqs_odt, &ddr_phy->ddrphy_reg[0x2e]);
525 writel(dqs_odt, &ddr_phy->ddrphy_reg[0x31]);
526 writel(dqs_odt, &ddr_phy->ddrphy_reg[0x3e]);
527 writel(dqs_odt, &ddr_phy->ddrphy_reg[0x41]);
528 writel(dqs_odt, &ddr_phy->ddrphy_reg[0x4e]);
529 writel(dqs_odt, &ddr_phy->ddrphy_reg[0x51]);
530 writel(dqs_odt, &ddr_phy->ddrphy_reg[0x5e]);
531}
532
533void dram_cfg_rbc(struct chan_info *chan,
534 struct rk322x_sdram_params *sdram_params)
535{
536 char noc_config;
537 int i = 0;
538 struct rk322x_sdram_channel *config = &sdram_params->ch[0];
539 struct rk322x_service_sys *axi_bus = chan->msch;
540
541 move_to_config_state(chan->pctl);
542
543 if ((config->rank == 2) && (config->cs1_row == config->cs0_row)) {
544 if ((config->col + config->bw) == 12) {
545 i = 14;
546 goto finish;
547 } else if ((config->col + config->bw) == 11) {
548 i = 15;
549 goto finish;
550 }
551 }
552 noc_config = ((config->cs0_row - 13) << 4) | ((config->bk - 2) << 2) |
553 (config->col + config->bw - 11);
554 for (i = 0; i < 11; i++) {
555 if (noc_config == ddr_cfg_2_rbc[i])
556 break;
557 }
558
559 if (i < 11)
560 goto finish;
561
562 noc_config = ((config->bk - 2) << 6) | ((config->cs0_row - 13) << 4) |
563 (config->col + config->bw - 11);
564
565 for (i = 11; i < 14; i++) {
566 if (noc_config == ddr_cfg_2_rbc[i])
567 break;
568 }
569 if (i < 14)
570 goto finish;
571 else
572 i = 0;
573
574finish:
575 writel(i, &axi_bus->ddrconf);
576 move_to_access_state(chan->pctl);
577}
578
579static void dram_all_config(const struct dram_info *dram,
580 struct rk322x_sdram_params *sdram_params)
581{
582 struct rk322x_sdram_channel *info = &sdram_params->ch[0];
583 u32 sys_reg = 0;
584
585 sys_reg |= sdram_params->base.dramtype << SYS_REG_DDRTYPE_SHIFT;
586 sys_reg |= (1 - 1) << SYS_REG_NUM_CH_SHIFT;
587 sys_reg |= info->row_3_4 << SYS_REG_ROW_3_4_SHIFT(0);
588 sys_reg |= 1 << SYS_REG_CHINFO_SHIFT(0);
589 sys_reg |= (info->rank - 1) << SYS_REG_RANK_SHIFT(0);
590 sys_reg |= (info->col - 9) << SYS_REG_COL_SHIFT(0);
591 sys_reg |= info->bk == 3 ? 0 : 1 << SYS_REG_BK_SHIFT(0);
592 sys_reg |= (info->cs0_row - 13) << SYS_REG_CS0_ROW_SHIFT(0);
593 sys_reg |= (info->cs1_row - 13) << SYS_REG_CS1_ROW_SHIFT(0);
594 sys_reg |= (2 >> info->bw) << SYS_REG_BW_SHIFT(0);
595 sys_reg |= (2 >> info->dbw) << SYS_REG_DBW_SHIFT(0);
596
597 writel(sys_reg, &dram->grf->os_reg[2]);
598}
599
600#define TEST_PATTEN 0x5aa5f00f
601
602static int dram_cap_detect(struct dram_info *dram,
603 struct rk322x_sdram_params *sdram_params)
604{
605 u32 bw, row, col, addr;
606 u32 ret = 0;
607 struct rk322x_service_sys *axi_bus = dram->chan[0].msch;
608
609 if (sdram_params->base.dramtype == DDR3)
610 sdram_params->ch[0].dbw = 1;
611 else
612 sdram_params->ch[0].dbw = 2;
613
614 move_to_config_state(dram->chan[0].pctl);
615 /* bw detect */
616 set_bw(dram, 2);
617 if (data_training(&dram->chan[0]) == 0) {
618 bw = 2;
619 } else {
620 bw = 1;
621 set_bw(dram, 1);
622 move_to_lowpower_state(dram->chan[0].pctl);
623 phy_softreset(dram);
624 move_to_config_state(dram->chan[0].pctl);
625 if (data_training(&dram->chan[0])) {
626 printf("BW detect error\n");
627 ret = -EINVAL;
628 }
629 }
630 sdram_params->ch[0].bw = bw;
631 sdram_params->ch[0].bk = 3;
632
633 if (bw == 2)
634 writel(6, &axi_bus->ddrconf);
635 else
636 writel(3, &axi_bus->ddrconf);
637 move_to_access_state(dram->chan[0].pctl);
638 for (col = 11; col >= 9; col--) {
639 writel(0, CONFIG_SYS_SDRAM_BASE);
640 addr = CONFIG_SYS_SDRAM_BASE +
641 (1 << (col + bw - 1));
642 writel(TEST_PATTEN, addr);
643 if ((readl(addr) == TEST_PATTEN) &&
644 (readl(CONFIG_SYS_SDRAM_BASE) == 0))
645 break;
646 }
647 if (col == 8) {
648 printf("Col detect error\n");
649 ret = -EINVAL;
650 goto out;
651 } else {
652 sdram_params->ch[0].col = col;
653 }
654
655 writel(10, &axi_bus->ddrconf);
656
657 /* Detect row*/
658 for (row = 16; row >= 12; row--) {
659 writel(0, CONFIG_SYS_SDRAM_BASE);
660 addr = CONFIG_SYS_SDRAM_BASE + (1u << (row + 11 + 3 - 1));
661 writel(TEST_PATTEN, addr);
662 if ((readl(addr) == TEST_PATTEN) &&
663 (readl(CONFIG_SYS_SDRAM_BASE) == 0))
664 break;
665 }
666 if (row == 11) {
667 printf("Row detect error\n");
668 ret = -EINVAL;
669 } else {
670 sdram_params->ch[0].cs1_row = row;
671 sdram_params->ch[0].row_3_4 = 0;
672 sdram_params->ch[0].cs0_row = row;
673 }
674 /* cs detect */
675 writel(0, CONFIG_SYS_SDRAM_BASE);
676 writel(TEST_PATTEN, CONFIG_SYS_SDRAM_BASE + (1u << 30));
677 writel(~TEST_PATTEN, CONFIG_SYS_SDRAM_BASE + (1u << 30) + 4);
678 if ((readl(CONFIG_SYS_SDRAM_BASE + (1u << 30)) == TEST_PATTEN) &&
679 (readl(CONFIG_SYS_SDRAM_BASE) == 0))
680 sdram_params->ch[0].rank = 2;
681 else
682 sdram_params->ch[0].rank = 1;
683out:
684 return ret;
685}
686
687static int sdram_init(struct dram_info *dram,
688 struct rk322x_sdram_params *sdram_params)
689{
690 int ret;
691
692 ret = clk_set_rate(&dram->ddr_clk,
693 sdram_params->base.ddr_freq * MHz * 2);
694 if (ret < 0) {
695 printf("Could not set DDR clock\n");
696 return ret;
697 }
698
699 phy_pctrl_reset(dram->cru, dram->chan[0].phy);
700 phy_dll_bypass_set(dram->chan[0].phy, sdram_params->base.ddr_freq);
701 pctl_cfg(dram->chan[0].pctl, sdram_params, dram->grf);
702 phy_cfg(&dram->chan[0], sdram_params);
703 writel(POWER_UP_START, &dram->chan[0].pctl->powctl);
704 while (!(readl(&dram->chan[0].pctl->powstat) & POWER_UP_DONE))
705 ;
706 memory_init(&dram->chan[0], sdram_params);
707 move_to_access_state(dram->chan[0].pctl);
708 ret = dram_cap_detect(dram, sdram_params);
709 if (ret)
710 goto out;
711 dram_cfg_rbc(&dram->chan[0], sdram_params);
712 dram_all_config(dram, sdram_params);
713out:
714 return ret;
715}
716
Simon Glassaad29ae2020-12-03 16:55:21 -0700717static int rk322x_dmc_of_to_plat(struct udevice *dev)
Kever Yang1a94b9e2017-09-27 16:38:22 +0800718{
Simon Glassfa20e932020-12-03 16:55:20 -0700719 struct rk322x_sdram_params *params = dev_get_plat(dev);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800720 const void *blob = gd->fdt_blob;
721 int node = dev_of_offset(dev);
722 int ret;
723
Simon Glass6d70ba02021-08-07 07:24:06 -0600724 if (!CONFIG_IS_ENABLED(OF_REAL))
725 return 0;
726
Kever Yang1a94b9e2017-09-27 16:38:22 +0800727 params->num_channels = 1;
728
729 ret = fdtdec_get_int_array(blob, node, "rockchip,pctl-timing",
730 (u32 *)&params->pctl_timing,
731 sizeof(params->pctl_timing) / sizeof(u32));
732 if (ret) {
733 printf("%s: Cannot read rockchip,pctl-timing\n", __func__);
734 return -EINVAL;
735 }
736 ret = fdtdec_get_int_array(blob, node, "rockchip,phy-timing",
737 (u32 *)&params->phy_timing,
738 sizeof(params->phy_timing) / sizeof(u32));
739 if (ret) {
740 printf("%s: Cannot read rockchip,phy-timing\n", __func__);
741 return -EINVAL;
742 }
743 ret = fdtdec_get_int_array(blob, node, "rockchip,sdram-params",
744 (u32 *)&params->base,
745 sizeof(params->base) / sizeof(u32));
746 if (ret) {
747 printf("%s: Cannot read rockchip,sdram-params\n", __func__);
748 return -EINVAL;
749 }
Masahiro Yamadae4873e32018-04-19 12:14:03 +0900750 ret = regmap_init_mem(dev_ofnode(dev), &params->map);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800751 if (ret)
752 return ret;
Kever Yang1a94b9e2017-09-27 16:38:22 +0800753
754 return 0;
755}
Kever Yang956798c2019-04-02 20:41:19 +0800756#endif /* CONFIG_TPL_BUILD */
Kever Yang1a94b9e2017-09-27 16:38:22 +0800757
758#if CONFIG_IS_ENABLED(OF_PLATDATA)
Simon Glassb75b15b2020-12-03 16:55:23 -0700759static int conv_of_plat(struct udevice *dev)
Kever Yang1a94b9e2017-09-27 16:38:22 +0800760{
Simon Glassfa20e932020-12-03 16:55:20 -0700761 struct rk322x_sdram_params *plat = dev_get_plat(dev);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800762 struct dtd_rockchip_rk322x_dmc *of_plat = &plat->of_plat;
763 int ret;
764
765 memcpy(&plat->pctl_timing, of_plat->rockchip_pctl_timing,
766 sizeof(plat->pctl_timing));
767 memcpy(&plat->phy_timing, of_plat->rockchip_phy_timing,
768 sizeof(plat->phy_timing));
769 memcpy(&plat->base, of_plat->rockchip_sdram_params, sizeof(plat->base));
770
771 plat->num_channels = 1;
Simon Glassb75b15b2020-12-03 16:55:23 -0700772 ret = regmap_init_mem_plat(dev, of_plat->reg,
773 ARRAY_SIZE(of_plat->reg) / 2, &plat->map);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800774 if (ret)
775 return ret;
776
777 return 0;
778}
779#endif
780
781static int rk322x_dmc_probe(struct udevice *dev)
782{
Kever Yang956798c2019-04-02 20:41:19 +0800783#ifdef CONFIG_TPL_BUILD
Simon Glassfa20e932020-12-03 16:55:20 -0700784 struct rk322x_sdram_params *plat = dev_get_plat(dev);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800785 int ret;
786 struct udevice *dev_clk;
787#endif
788 struct dram_info *priv = dev_get_priv(dev);
789
790 priv->grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF);
Kever Yang956798c2019-04-02 20:41:19 +0800791#ifdef CONFIG_TPL_BUILD
Kever Yang1a94b9e2017-09-27 16:38:22 +0800792#if CONFIG_IS_ENABLED(OF_PLATDATA)
Simon Glassb75b15b2020-12-03 16:55:23 -0700793 ret = conv_of_plat(dev);
Kever Yang1a94b9e2017-09-27 16:38:22 +0800794 if (ret)
795 return ret;
796#endif
797
798 priv->chan[0].msch = syscon_get_first_range(ROCKCHIP_SYSCON_MSCH);
799 priv->chan[0].pctl = regmap_get_range(plat->map, 0);
800 priv->chan[0].phy = regmap_get_range(plat->map, 1);
801 ret = rockchip_get_clk(&dev_clk);
802 if (ret)
803 return ret;
804 priv->ddr_clk.id = CLK_DDR;
805 ret = clk_request(dev_clk, &priv->ddr_clk);
806 if (ret)
807 return ret;
808
809 priv->cru = rockchip_get_cru();
810 if (IS_ERR(priv->cru))
811 return PTR_ERR(priv->cru);
812 ret = sdram_init(priv, plat);
813 if (ret)
814 return ret;
815#else
816 priv->info.base = CONFIG_SYS_SDRAM_BASE;
817 priv->info.size = rockchip_sdram_size(
818 (phys_addr_t)&priv->grf->os_reg[2]);
819#endif
820
821 return 0;
822}
823
824static int rk322x_dmc_get_info(struct udevice *dev, struct ram_info *info)
825{
826 struct dram_info *priv = dev_get_priv(dev);
827
828 *info = priv->info;
829
830 return 0;
831}
832
833static struct ram_ops rk322x_dmc_ops = {
834 .get_info = rk322x_dmc_get_info,
835};
836
837static const struct udevice_id rk322x_dmc_ids[] = {
838 { .compatible = "rockchip,rk3228-dmc" },
839 { }
840};
841
842U_BOOT_DRIVER(dmc_rk322x) = {
843 .name = "rockchip_rk322x_dmc",
844 .id = UCLASS_RAM,
845 .of_match = rk322x_dmc_ids,
846 .ops = &rk322x_dmc_ops,
Kever Yang956798c2019-04-02 20:41:19 +0800847#ifdef CONFIG_TPL_BUILD
Simon Glassaad29ae2020-12-03 16:55:21 -0700848 .of_to_plat = rk322x_dmc_of_to_plat,
Kever Yang1a94b9e2017-09-27 16:38:22 +0800849#endif
850 .probe = rk322x_dmc_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700851 .priv_auto = sizeof(struct dram_info),
Kever Yang956798c2019-04-02 20:41:19 +0800852#ifdef CONFIG_TPL_BUILD
Simon Glass71fa5b42020-12-03 16:55:18 -0700853 .plat_auto = sizeof(struct rk322x_sdram_params),
Kever Yang1a94b9e2017-09-27 16:38:22 +0800854#endif
855};