blob: 42cbf98799bd59af7d4f9a7fc27c3989d5ebb61e [file] [log] [blame]
Caesar Wangf33eb2c2016-10-27 01:13:16 +08001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30#include <debug.h>
31#include <arch_helpers.h>
32#include <platform_def.h>
33#include <plat_private.h>
34#include <dram.h>
35#include <pmu_regs.h>
36#include <rk3399_def.h>
Xing Zheng22a98712017-02-24 14:56:41 +080037#include <secure.h>
Caesar Wangf33eb2c2016-10-27 01:13:16 +080038#include <soc.h>
39#include <suspend.h>
40
41#define PMUGRF_OS_REG0 0x300
42#define PMUGRF_OS_REG1 0x304
43#define PMUGRF_OS_REG2 0x308
44#define PMUGRF_OS_REG3 0x30c
45
46#define CRU_SFTRST_DDR_CTRL(ch, n) ((0x1 << (8 + 16 + (ch) * 4)) | \
47 ((n) << (8 + (ch) * 4)))
48#define CRU_SFTRST_DDR_PHY(ch, n) ((0x1 << (9 + 16 + (ch) * 4)) | \
49 ((n) << (9 + (ch) * 4)))
50
51#define FBDIV_ENC(n) ((n) << 16)
52#define FBDIV_DEC(n) (((n) >> 16) & 0xfff)
53#define POSTDIV2_ENC(n) ((n) << 12)
54#define POSTDIV2_DEC(n) (((n) >> 12) & 0x7)
55#define POSTDIV1_ENC(n) ((n) << 8)
56#define POSTDIV1_DEC(n) (((n) >> 8) & 0x7)
57#define REFDIV_ENC(n) (n)
58#define REFDIV_DEC(n) ((n) & 0x3f)
59
60/* PMU CRU */
61#define PMUCRU_RSTNHOLD_CON0 0x120
62#define PMUCRU_RSTNHOLD_CON1 0x124
63
64#define PRESET_GPIO0_HOLD(n) (((n) << 7) | WMSK_BIT(7))
65#define PRESET_GPIO1_HOLD(n) (((n) << 8) | WMSK_BIT(8))
66
67#define SYS_COUNTER_FREQ_IN_MHZ (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
68
69/*
70 * Copy @num registers from @src to @dst
71 */
72__sramfunc void sram_regcpy(uintptr_t dst, uintptr_t src, uint32_t num)
73{
74 while (num--) {
75 mmio_write_32(dst, mmio_read_32(src));
76 dst += sizeof(uint32_t);
77 src += sizeof(uint32_t);
78 }
79}
80
81static __sramfunc uint32_t sram_get_timer_value(void)
82{
83 /*
84 * Generic delay timer implementation expects the timer to be a down
85 * counter. We apply bitwise NOT operator to the tick values returned
86 * by read_cntpct_el0() to simulate the down counter.
87 */
88 return (uint32_t)(~read_cntpct_el0());
89}
90
91static __sramfunc void sram_udelay(uint32_t usec)
92{
93 uint32_t start, cnt, delta, delta_us;
94
95 /* counter is decreasing */
96 start = sram_get_timer_value();
97 do {
98 cnt = sram_get_timer_value();
99 if (cnt > start) {
100 delta = UINT32_MAX - cnt;
101 delta += start;
102 } else
103 delta = start - cnt;
104 delta_us = (delta * SYS_COUNTER_FREQ_IN_MHZ);
105 } while (delta_us < usec);
106}
107
108static __sramfunc void configure_sgrf(void)
109{
110 /*
111 * SGRF_DDR_RGN_DPLL_CLK and SGRF_DDR_RGN_RTC_CLK:
112 * IC ECO bug, need to set this register.
113 *
114 * SGRF_DDR_RGN_BYPS:
115 * After the PD_CENTER suspend/resume, the DDR region
116 * related registers in the SGRF will be reset, we
117 * need to re-initialize them.
118 */
119 mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
120 SGRF_DDR_RGN_DPLL_CLK |
121 SGRF_DDR_RGN_RTC_CLK |
122 SGRF_DDR_RGN_BYPS);
123}
124
125static __sramfunc void rkclk_ddr_reset(uint32_t channel, uint32_t ctl,
126 uint32_t phy)
127{
128 channel &= 0x1;
129 ctl &= 0x1;
130 phy &= 0x1;
131 mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(4),
132 CRU_SFTRST_DDR_CTRL(channel, ctl) |
133 CRU_SFTRST_DDR_PHY(channel, phy));
134}
135
136static __sramfunc void phy_pctrl_reset(uint32_t ch)
137{
138 rkclk_ddr_reset(ch, 1, 1);
139 sram_udelay(10);
140 rkclk_ddr_reset(ch, 1, 0);
141 sram_udelay(10);
142 rkclk_ddr_reset(ch, 0, 0);
143 sram_udelay(10);
144}
145
146static __sramfunc void phy_dll_bypass_set(uint32_t ch, uint32_t hz)
147{
148 if (hz <= 125 * MHz) {
149 /* phy_sw_master_mode_X PHY_86/214/342/470 4bits offset_8 */
150 mmio_setbits_32(PHY_REG(ch, 86), (0x3 << 2) << 8);
151 mmio_setbits_32(PHY_REG(ch, 214), (0x3 << 2) << 8);
152 mmio_setbits_32(PHY_REG(ch, 342), (0x3 << 2) << 8);
153 mmio_setbits_32(PHY_REG(ch, 470), (0x3 << 2) << 8);
154 /* phy_adrctl_sw_master_mode PHY_547/675/803 4bits offset_16 */
155 mmio_setbits_32(PHY_REG(ch, 547), (0x3 << 2) << 16);
156 mmio_setbits_32(PHY_REG(ch, 675), (0x3 << 2) << 16);
157 mmio_setbits_32(PHY_REG(ch, 803), (0x3 << 2) << 16);
158 } else {
159 /* phy_sw_master_mode_X PHY_86/214/342/470 4bits offset_8 */
160 mmio_clrbits_32(PHY_REG(ch, 86), (0x3 << 2) << 8);
161 mmio_clrbits_32(PHY_REG(ch, 214), (0x3 << 2) << 8);
162 mmio_clrbits_32(PHY_REG(ch, 342), (0x3 << 2) << 8);
163 mmio_clrbits_32(PHY_REG(ch, 470), (0x3 << 2) << 8);
164 /* phy_adrctl_sw_master_mode PHY_547/675/803 4bits offset_16 */
165 mmio_clrbits_32(PHY_REG(ch, 547), (0x3 << 2) << 16);
166 mmio_clrbits_32(PHY_REG(ch, 675), (0x3 << 2) << 16);
167 mmio_clrbits_32(PHY_REG(ch, 803), (0x3 << 2) << 16);
168 }
169}
170
171static __sramfunc void set_cs_training_index(uint32_t ch, uint32_t rank)
172{
173 /* PHY_8/136/264/392 phy_per_cs_training_index_X 1bit offset_24 */
174 mmio_clrsetbits_32(PHY_REG(ch, 8), 0x1 << 24, rank << 24);
175 mmio_clrsetbits_32(PHY_REG(ch, 136), 0x1 << 24, rank << 24);
176 mmio_clrsetbits_32(PHY_REG(ch, 264), 0x1 << 24, rank << 24);
177 mmio_clrsetbits_32(PHY_REG(ch, 392), 0x1 << 24, rank << 24);
178}
179
180static __sramfunc void select_per_cs_training_index(uint32_t ch, uint32_t rank)
181{
182 /* PHY_84 PHY_PER_CS_TRAINING_EN_0 1bit offset_16 */
183 if ((mmio_read_32(PHY_REG(ch, 84)) >> 16) & 1)
184 set_cs_training_index(ch, rank);
185}
186
187static void override_write_leveling_value(uint32_t ch)
188{
189 uint32_t byte;
190
191 /* PHY_896 PHY_FREQ_SEL_MULTICAST_EN 1bit offset_0 */
192 mmio_setbits_32(PHY_REG(ch, 896), 1);
193
194 /*
195 * PHY_8/136/264/392
196 * phy_per_cs_training_multicast_en_X 1bit offset_16
197 */
198 mmio_clrsetbits_32(PHY_REG(ch, 8), 0x1 << 16, 1 << 16);
199 mmio_clrsetbits_32(PHY_REG(ch, 136), 0x1 << 16, 1 << 16);
200 mmio_clrsetbits_32(PHY_REG(ch, 264), 0x1 << 16, 1 << 16);
201 mmio_clrsetbits_32(PHY_REG(ch, 392), 0x1 << 16, 1 << 16);
202
203 for (byte = 0; byte < 4; byte++)
204 mmio_clrsetbits_32(PHY_REG(ch, 63 + (128 * byte)),
205 0xffff << 16,
206 0x200 << 16);
207
208 /* PHY_896 PHY_FREQ_SEL_MULTICAST_EN 1bit offset_0 */
209 mmio_clrbits_32(PHY_REG(ch, 896), 1);
210
211 /* CTL_200 ctrlupd_req 1bit offset_8 */
212 mmio_clrsetbits_32(CTL_REG(ch, 200), 0x1 << 8, 0x1 << 8);
213}
214
215static __sramfunc int data_training(uint32_t ch,
216 struct rk3399_sdram_params *sdram_params,
217 uint32_t training_flag)
218{
219 uint32_t obs_0, obs_1, obs_2, obs_3, obs_err = 0;
220 uint32_t rank = sdram_params->ch[ch].rank;
221 uint32_t rank_mask;
222 uint32_t i, tmp;
223
224 if (sdram_params->dramtype == LPDDR4)
225 rank_mask = (rank == 1) ? 0x5 : 0xf;
226 else
227 rank_mask = (rank == 1) ? 0x1 : 0x3;
228
229 /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */
230 mmio_setbits_32(PHY_REG(ch, 927), (1 << 22));
231
232 if (training_flag == PI_FULL_TRAINING) {
233 if (sdram_params->dramtype == LPDDR4) {
234 training_flag = PI_WRITE_LEVELING |
235 PI_READ_GATE_TRAINING |
236 PI_READ_LEVELING |
237 PI_WDQ_LEVELING;
238 } else if (sdram_params->dramtype == LPDDR3) {
239 training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING |
240 PI_READ_GATE_TRAINING;
241 } else if (sdram_params->dramtype == DDR3) {
242 training_flag = PI_WRITE_LEVELING |
243 PI_READ_GATE_TRAINING |
244 PI_READ_LEVELING;
245 }
246 }
247
248 /* ca training(LPDDR4,LPDDR3 support) */
249 if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) {
250 for (i = 0; i < 4; i++) {
251 if (!(rank_mask & (1 << i)))
252 continue;
253
254 select_per_cs_training_index(ch, i);
255 /* PI_100 PI_CALVL_EN:RW:8:2 */
256 mmio_clrsetbits_32(PI_REG(ch, 100), 0x3 << 8, 0x2 << 8);
257
258 /* PI_92 PI_CALVL_REQ:WR:16:1,PI_CALVL_CS:RW:24:2 */
259 mmio_clrsetbits_32(PI_REG(ch, 92),
260 (0x1 << 16) | (0x3 << 24),
261 (0x1 << 16) | (i << 24));
262 while (1) {
263 /* PI_174 PI_INT_STATUS:RD:8:18 */
264 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
265
266 /*
267 * check status obs
268 * PHY_532/660/788 phy_adr_calvl_obs1_:0:32
269 */
270 obs_0 = mmio_read_32(PHY_REG(ch, 532));
271 obs_1 = mmio_read_32(PHY_REG(ch, 660));
272 obs_2 = mmio_read_32(PHY_REG(ch, 788));
273 if (((obs_0 >> 30) & 0x3) ||
274 ((obs_1 >> 30) & 0x3) ||
275 ((obs_2 >> 30) & 0x3))
276 obs_err = 1;
277 if ((((tmp >> 11) & 0x1) == 0x1) &&
278 (((tmp >> 13) & 0x1) == 0x1) &&
279 (((tmp >> 5) & 0x1) == 0x0) &&
280 (obs_err == 0))
281 break;
282 else if ((((tmp >> 5) & 0x1) == 0x1) ||
283 (obs_err == 1))
284 return -1;
285 }
286 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
287 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
288 }
289 mmio_clrbits_32(PI_REG(ch, 100), 0x3 << 8);
290 }
291
292 /* write leveling(LPDDR4,LPDDR3,DDR3 support) */
293 if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) {
294 for (i = 0; i < rank; i++) {
295 select_per_cs_training_index(ch, i);
296 /* PI_60 PI_WRLVL_EN:RW:8:2 */
297 mmio_clrsetbits_32(PI_REG(ch, 60), 0x3 << 8, 0x2 << 8);
298 /* PI_59 PI_WRLVL_REQ:WR:8:1,PI_WRLVL_CS:RW:16:2 */
299 mmio_clrsetbits_32(PI_REG(ch, 59),
300 (0x1 << 8) | (0x3 << 16),
301 (0x1 << 8) | (i << 16));
302
303 while (1) {
304 /* PI_174 PI_INT_STATUS:RD:8:18 */
305 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
306
307 /*
308 * check status obs, if error maybe can not
309 * get leveling done PHY_40/168/296/424
310 * phy_wrlvl_status_obs_X:0:13
311 */
312 obs_0 = mmio_read_32(PHY_REG(ch, 40));
313 obs_1 = mmio_read_32(PHY_REG(ch, 168));
314 obs_2 = mmio_read_32(PHY_REG(ch, 296));
315 obs_3 = mmio_read_32(PHY_REG(ch, 424));
316 if (((obs_0 >> 12) & 0x1) ||
317 ((obs_1 >> 12) & 0x1) ||
318 ((obs_2 >> 12) & 0x1) ||
319 ((obs_3 >> 12) & 0x1))
320 obs_err = 1;
321 if ((((tmp >> 10) & 0x1) == 0x1) &&
322 (((tmp >> 13) & 0x1) == 0x1) &&
323 (((tmp >> 4) & 0x1) == 0x0) &&
324 (obs_err == 0))
325 break;
326 else if ((((tmp >> 4) & 0x1) == 0x1) ||
327 (obs_err == 1))
328 return -1;
329 }
330
331 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
332 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
333 }
334 override_write_leveling_value(ch);
335 mmio_clrbits_32(PI_REG(ch, 60), 0x3 << 8);
336 }
337
338 /* read gate training(LPDDR4,LPDDR3,DDR3 support) */
339 if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) {
340 for (i = 0; i < rank; i++) {
341 select_per_cs_training_index(ch, i);
342 /* PI_80 PI_RDLVL_GATE_EN:RW:24:2 */
343 mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 24,
344 0x2 << 24);
345 /*
346 * PI_74 PI_RDLVL_GATE_REQ:WR:16:1
347 * PI_RDLVL_CS:RW:24:2
348 */
349 mmio_clrsetbits_32(PI_REG(ch, 74),
350 (0x1 << 16) | (0x3 << 24),
351 (0x1 << 16) | (i << 24));
352
353 while (1) {
354 /* PI_174 PI_INT_STATUS:RD:8:18 */
355 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
356
357 /*
358 * check status obs
359 * PHY_43/171/299/427
360 * PHY_GTLVL_STATUS_OBS_x:16:8
361 */
362 obs_0 = mmio_read_32(PHY_REG(ch, 43));
363 obs_1 = mmio_read_32(PHY_REG(ch, 171));
364 obs_2 = mmio_read_32(PHY_REG(ch, 299));
365 obs_3 = mmio_read_32(PHY_REG(ch, 427));
366 if (((obs_0 >> (16 + 6)) & 0x3) ||
367 ((obs_1 >> (16 + 6)) & 0x3) ||
368 ((obs_2 >> (16 + 6)) & 0x3) ||
369 ((obs_3 >> (16 + 6)) & 0x3))
370 obs_err = 1;
371 if ((((tmp >> 9) & 0x1) == 0x1) &&
372 (((tmp >> 13) & 0x1) == 0x1) &&
373 (((tmp >> 3) & 0x1) == 0x0) &&
374 (obs_err == 0))
375 break;
376 else if ((((tmp >> 3) & 0x1) == 0x1) ||
377 (obs_err == 1))
378 return -1;
379 }
380 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
381 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
382 }
383 mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 24);
384 }
385
386 /* read leveling(LPDDR4,LPDDR3,DDR3 support) */
387 if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) {
388 for (i = 0; i < rank; i++) {
389 select_per_cs_training_index(ch, i);
390 /* PI_80 PI_RDLVL_EN:RW:16:2 */
391 mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 16,
392 0x2 << 16);
393 /* PI_74 PI_RDLVL_REQ:WR:8:1,PI_RDLVL_CS:RW:24:2 */
394 mmio_clrsetbits_32(PI_REG(ch, 74),
395 (0x1 << 8) | (0x3 << 24),
396 (0x1 << 8) | (i << 24));
397 while (1) {
398 /* PI_174 PI_INT_STATUS:RD:8:18 */
399 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
400
401 /*
402 * make sure status obs not report error bit
403 * PHY_46/174/302/430
404 * phy_rdlvl_status_obs_X:16:8
405 */
406 if ((((tmp >> 8) & 0x1) == 0x1) &&
407 (((tmp >> 13) & 0x1) == 0x1) &&
408 (((tmp >> 2) & 0x1) == 0x0))
409 break;
410 else if (((tmp >> 2) & 0x1) == 0x1)
411 return -1;
412 }
413 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
414 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
415 }
416 mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 16);
417 }
418
419 /* wdq leveling(LPDDR4 support) */
420 if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) {
421 for (i = 0; i < 4; i++) {
422 if (!(rank_mask & (1 << i)))
423 continue;
424
425 select_per_cs_training_index(ch, i);
426 /*
427 * disable PI_WDQLVL_VREF_EN before wdq leveling?
428 * PI_181 PI_WDQLVL_VREF_EN:RW:8:1
429 */
430 mmio_clrbits_32(PI_REG(ch, 181), 0x1 << 8);
431 /* PI_124 PI_WDQLVL_EN:RW:16:2 */
432 mmio_clrsetbits_32(PI_REG(ch, 124), 0x3 << 16,
433 0x2 << 16);
434 /* PI_121 PI_WDQLVL_REQ:WR:8:1,PI_WDQLVL_CS:RW:16:2 */
435 mmio_clrsetbits_32(PI_REG(ch, 121),
436 (0x1 << 8) | (0x3 << 16),
437 (0x1 << 8) | (i << 16));
438 while (1) {
439 /* PI_174 PI_INT_STATUS:RD:8:18 */
440 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
441 if ((((tmp >> 12) & 0x1) == 0x1) &&
442 (((tmp >> 13) & 0x1) == 0x1) &&
443 (((tmp >> 6) & 0x1) == 0x0))
444 break;
445 else if (((tmp >> 6) & 0x1) == 0x1)
446 return -1;
447 }
448 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
449 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
450 }
451 mmio_clrbits_32(PI_REG(ch, 124), 0x3 << 16);
452 }
453
454 /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */
455 mmio_clrbits_32(PHY_REG(ch, 927), (1 << 22));
456
457 return 0;
458}
459
460static __sramfunc void set_ddrconfig(struct rk3399_sdram_params *sdram_params,
461 unsigned char channel, uint32_t ddrconfig)
462{
463 /* only need to set ddrconfig */
464 struct rk3399_sdram_channel *ch = &sdram_params->ch[channel];
465 unsigned int cs0_cap = 0;
466 unsigned int cs1_cap = 0;
467
468 cs0_cap = (1 << (ch->cs0_row + ch->col + ch->bk + ch->bw - 20));
469 if (ch->rank > 1)
470 cs1_cap = cs0_cap >> (ch->cs0_row - ch->cs1_row);
471 if (ch->row_3_4) {
472 cs0_cap = cs0_cap * 3 / 4;
473 cs1_cap = cs1_cap * 3 / 4;
474 }
475
476 mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICECONF,
477 ddrconfig | (ddrconfig << 6));
478 mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICESIZE,
479 ((cs0_cap / 32) & 0xff) | (((cs1_cap / 32) & 0xff) << 8));
480}
481
482static __sramfunc void dram_all_config(struct rk3399_sdram_params *sdram_params)
483{
484 unsigned int i;
485
486 for (i = 0; i < 2; i++) {
487 struct rk3399_sdram_channel *info = &sdram_params->ch[i];
488 struct rk3399_msch_timings *noc = &info->noc_timings;
489
490 if (sdram_params->ch[i].col == 0)
491 continue;
492
493 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGA0,
494 noc->ddrtiminga0.d32);
495 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGB0,
496 noc->ddrtimingb0.d32);
497 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGC0,
498 noc->ddrtimingc0.d32);
499 mmio_write_32(MSCH_BASE(i) + MSCH_DEVTODEV0,
500 noc->devtodev0.d32);
501 mmio_write_32(MSCH_BASE(i) + MSCH_DDRMODE, noc->ddrmode.d32);
502
503 /* rank 1 memory clock disable (dfi_dram_clk_disable = 1) */
504 if (sdram_params->ch[i].rank == 1)
505 mmio_setbits_32(CTL_REG(i, 276), 1 << 17);
506 }
507
508 DDR_STRIDE(sdram_params->stride);
509
510 /* reboot hold register set */
511 mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
512 CRU_PMU_SGRF_RST_RLS |
513 PRESET_GPIO0_HOLD(1) |
514 PRESET_GPIO1_HOLD(1));
515 mmio_clrsetbits_32(CRU_BASE + CRU_GLB_RST_CON, 0x3, 0x3);
516}
517
518static __sramfunc void pctl_cfg(uint32_t ch,
519 struct rk3399_sdram_params *sdram_params)
520{
521 const uint32_t *params_ctl = sdram_params->pctl_regs.denali_ctl;
522 const uint32_t *params_phy = sdram_params->phy_regs.denali_phy;
523 const uint32_t *params_pi = sdram_params->pi_regs.denali_pi;
524 uint32_t tmp, tmp1, tmp2;
525
526 /*
527 * Workaround controller bug:
528 * Do not program DRAM_CLASS until NO_PHY_IND_TRAIN_INT is programmed
529 */
530 sram_regcpy(CTL_REG(ch, 1), (uintptr_t)&params_ctl[1],
531 CTL_REG_NUM - 1);
532 mmio_write_32(CTL_REG(ch, 0), params_ctl[0]);
533 sram_regcpy(PI_REG(ch, 0), (uintptr_t)&params_pi[0],
534 PI_REG_NUM);
535
536 mmio_write_32(PHY_REG(ch, 910), params_phy[910]);
537 mmio_write_32(PHY_REG(ch, 911), params_phy[911]);
538 mmio_write_32(PHY_REG(ch, 912), params_phy[912]);
539
540 mmio_clrsetbits_32(CTL_REG(ch, 68), PWRUP_SREFRESH_EXIT,
541 PWRUP_SREFRESH_EXIT);
542
543 /* PHY_DLL_RST_EN */
544 mmio_clrsetbits_32(PHY_REG(ch, 957), 0x3 << 24, 1 << 24);
545 dmbst();
546
547 mmio_setbits_32(PI_REG(ch, 0), START);
548 mmio_setbits_32(CTL_REG(ch, 0), START);
549
550 /* wait lock */
551 while (1) {
552 tmp = mmio_read_32(PHY_REG(ch, 920));
553 tmp1 = mmio_read_32(PHY_REG(ch, 921));
554 tmp2 = mmio_read_32(PHY_REG(ch, 922));
555 if ((((tmp >> 16) & 0x1) == 0x1) &&
556 (((tmp1 >> 16) & 0x1) == 0x1) &&
557 (((tmp1 >> 0) & 0x1) == 0x1) &&
558 (((tmp2 >> 0) & 0x1) == 0x1))
559 break;
560 /* if PLL bypass,don't need wait lock */
561 if (mmio_read_32(PHY_REG(ch, 911)) & 0x1)
562 break;
563 }
564
565 sram_regcpy(PHY_REG(ch, 896), (uintptr_t)&params_phy[896], 63);
566 sram_regcpy(PHY_REG(ch, 0), (uintptr_t)&params_phy[0], 91);
567 sram_regcpy(PHY_REG(ch, 128), (uintptr_t)&params_phy[128], 91);
568 sram_regcpy(PHY_REG(ch, 256), (uintptr_t)&params_phy[256], 91);
569 sram_regcpy(PHY_REG(ch, 384), (uintptr_t)&params_phy[384], 91);
570 sram_regcpy(PHY_REG(ch, 512), (uintptr_t)&params_phy[512], 38);
571 sram_regcpy(PHY_REG(ch, 640), (uintptr_t)&params_phy[640], 38);
572 sram_regcpy(PHY_REG(ch, 768), (uintptr_t)&params_phy[768], 38);
573}
574
Derek Basehoree13bc542017-02-24 14:31:36 +0800575static __sramfunc int dram_switch_to_next_index(
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800576 struct rk3399_sdram_params *sdram_params)
577{
578 uint32_t ch, ch_count;
Derek Basehoree13bc542017-02-24 14:31:36 +0800579 uint32_t fn = ((mmio_read_32(CTL_REG(0, 111)) >> 16) + 1) & 0x1;
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800580
581 mmio_write_32(CIC_BASE + CIC_CTRL0,
582 (((0x3 << 4) | (1 << 2) | 1) << 16) |
Derek Basehoree13bc542017-02-24 14:31:36 +0800583 (fn << 4) | (1 << 2) | 1);
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800584 while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2)))
585 ;
586
587 mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002);
588 while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0)))
589 ;
590
591 ch_count = sdram_params->num_channels;
592
593 /* LPDDR4 f2 cann't do training, all training will fail */
594 for (ch = 0; ch < ch_count; ch++) {
595 mmio_clrsetbits_32(PHY_REG(ch, 896), (0x3 << 8) | 1,
Derek Basehoree13bc542017-02-24 14:31:36 +0800596 fn << 8);
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800597
598 /* data_training failed */
599 if (data_training(ch, sdram_params, PI_FULL_TRAINING))
600 return -1;
601 }
602
603 return 0;
604}
605
606/*
607 * Needs to be done for both channels at once in case of a shared reset signal
608 * between channels.
609 */
610static __sramfunc int pctl_start(uint32_t channel_mask,
611 struct rk3399_sdram_params *sdram_params)
612{
613 uint32_t count;
Derek Basehore04c74b92017-01-31 00:20:19 -0800614 uint32_t byte;
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800615
616 mmio_setbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
617 mmio_setbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
618
619 /* need de-access IO retention before controller START */
620 if (channel_mask & (1 << 0))
621 mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 19));
622 if (channel_mask & (1 << 1))
623 mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 23));
624
625 /* PHY_DLL_RST_EN */
626 if (channel_mask & (1 << 0))
627 mmio_clrsetbits_32(PHY_REG(0, 957), 0x3 << 24,
628 0x2 << 24);
629 if (channel_mask & (1 << 1))
630 mmio_clrsetbits_32(PHY_REG(1, 957), 0x3 << 24,
631 0x2 << 24);
632
633 /* check ERROR bit */
634 if (channel_mask & (1 << 0)) {
635 count = 0;
636 while (!(mmio_read_32(CTL_REG(0, 203)) & (1 << 3))) {
637 /* CKE is low, loop 10ms */
638 if (count > 100)
639 return -1;
640
641 sram_udelay(100);
642 count++;
643 }
644
645 mmio_clrbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
Derek Basehore04c74b92017-01-31 00:20:19 -0800646
647 /* Restore the PHY_RX_CAL_DQS value */
648 for (byte = 0; byte < 4; byte++)
649 mmio_clrsetbits_32(PHY_REG(0, 57 + 128 * byte),
650 0xfff << 16,
651 sdram_params->rx_cal_dqs[0][byte]);
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800652 }
653 if (channel_mask & (1 << 1)) {
654 count = 0;
655 while (!(mmio_read_32(CTL_REG(1, 203)) & (1 << 3))) {
656 /* CKE is low, loop 10ms */
657 if (count > 100)
658 return -1;
659
660 sram_udelay(100);
661 count++;
662 }
663
664 mmio_clrbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
Derek Basehore04c74b92017-01-31 00:20:19 -0800665
666 /* Restore the PHY_RX_CAL_DQS value */
667 for (byte = 0; byte < 4; byte++)
668 mmio_clrsetbits_32(PHY_REG(1, 57 + 128 * byte),
669 0xfff << 16,
670 sdram_params->rx_cal_dqs[1][byte]);
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800671 }
672
673 return 0;
674}
675
676void dmc_save(void)
677{
678 struct rk3399_sdram_params *sdram_params = &sdram_config;
679 uint32_t *params_ctl;
680 uint32_t *params_pi;
681 uint32_t *params_phy;
682 uint32_t refdiv, postdiv2, postdiv1, fbdiv;
Derek Basehore04c74b92017-01-31 00:20:19 -0800683 uint32_t tmp, ch, byte;
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800684
685 params_ctl = sdram_params->pctl_regs.denali_ctl;
686 params_pi = sdram_params->pi_regs.denali_pi;
687 params_phy = sdram_params->phy_regs.denali_phy;
688
689 fbdiv = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 0)) & 0xfff;
690 tmp = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1));
691 postdiv2 = POSTDIV2_DEC(tmp);
692 postdiv1 = POSTDIV1_DEC(tmp);
693 refdiv = REFDIV_DEC(tmp);
694
695 sdram_params->ddr_freq = ((fbdiv * 24) /
696 (refdiv * postdiv1 * postdiv2)) * MHz;
697
698 INFO("sdram_params->ddr_freq = %d\n", sdram_params->ddr_freq);
699 sdram_params->odt = (((mmio_read_32(PHY_REG(0, 5)) >> 16) &
700 0x7) != 0) ? 1 : 0;
701
702 /* copy the registers CTL PI and PHY */
703 sram_regcpy((uintptr_t)&params_ctl[0], CTL_REG(0, 0), CTL_REG_NUM);
704
705 /* mask DENALI_CTL_00_DATA.START, only copy here, will trigger later */
706 params_ctl[0] &= ~(0x1 << 0);
707
708 sram_regcpy((uintptr_t)&params_pi[0], PI_REG(0, 0),
709 PI_REG_NUM);
710
711 /* mask DENALI_PI_00_DATA.START, only copy here, will trigger later*/
712 params_pi[0] &= ~(0x1 << 0);
713
714 sram_regcpy((uintptr_t)&params_phy[0], PHY_REG(0, 0), 91);
715 sram_regcpy((uintptr_t)&params_phy[128], PHY_REG(0, 128), 91);
716 sram_regcpy((uintptr_t)&params_phy[256], PHY_REG(0, 256), 91);
717 sram_regcpy((uintptr_t)&params_phy[384], PHY_REG(0, 384), 91);
718 sram_regcpy((uintptr_t)&params_phy[512], PHY_REG(0, 512), 38);
719 sram_regcpy((uintptr_t)&params_phy[640], PHY_REG(0, 640), 38);
720 sram_regcpy((uintptr_t)&params_phy[768], PHY_REG(0, 768), 38);
721 sram_regcpy((uintptr_t)&params_phy[896], PHY_REG(0, 896), 63);
722
Derek Basehore04c74b92017-01-31 00:20:19 -0800723 for (ch = 0; ch < sdram_params->num_channels; ch++) {
724 for (byte = 0; byte < 4; byte++)
725 sdram_params->rx_cal_dqs[ch][byte] = (0xfff << 16) &
726 mmio_read_32(PHY_REG(ch, 57 + byte * 128));
727 }
728
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800729 /* set DENALI_PHY_957_DATA.PHY_DLL_RST_EN = 0x1 */
730 params_phy[957] &= ~(0x3 << 24);
731 params_phy[957] |= 1 << 24;
732 params_phy[896] |= 1;
733 params_phy[896] &= ~(0x3 << 8);
734}
735
736__sramfunc void dmc_restore(void)
737{
738 struct rk3399_sdram_params *sdram_params = &sdram_config;
739 uint32_t channel_mask = 0;
740 uint32_t channel;
741
742 configure_sgrf();
743
744retry:
745 for (channel = 0; channel < sdram_params->num_channels; channel++) {
746 phy_pctrl_reset(channel);
747 phy_dll_bypass_set(channel, sdram_params->ddr_freq);
748 if (channel >= sdram_params->num_channels)
749 continue;
750
751 pctl_cfg(channel, sdram_params);
752 }
753
754 for (channel = 0; channel < 2; channel++) {
755 if (sdram_params->ch[channel].col)
756 channel_mask |= 1 << channel;
757 }
758
759 if (pctl_start(channel_mask, sdram_params) < 0)
760 goto retry;
761
762 for (channel = 0; channel < sdram_params->num_channels; channel++) {
763 /* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */
764 if (sdram_params->dramtype == LPDDR3)
765 sram_udelay(10);
766
767 /* If traning fail, retry to do it again. */
768 if (data_training(channel, sdram_params, PI_FULL_TRAINING))
769 goto retry;
770
771 set_ddrconfig(sdram_params, channel,
772 sdram_params->ch[channel].ddrconfig);
773 }
774
775 dram_all_config(sdram_params);
776
777 /* Switch to index 1 and prepare for DDR frequency switch. */
Derek Basehoree13bc542017-02-24 14:31:36 +0800778 dram_switch_to_next_index(sdram_params);
Caesar Wangf33eb2c2016-10-27 01:13:16 +0800779}