blob: 737a4e2ff18b2ad7cfe0f3ac5bc3ddf184a58d65 [file] [log] [blame]
Tien Fong Chee277f1f42022-06-10 19:18:00 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
Tien Fong Cheef8e2eab2021-08-10 11:26:37 +08002/*
Tien Fong Chee277f1f42022-06-10 19:18:00 +08003 * Copyright (C) 2020-2022 Intel Corporation <www.intel.com>
Tien Fong Cheef8e2eab2021-08-10 11:26:37 +08004 *
5 */
6
7#include <common.h>
8#include <clk.h>
9#include <div64.h>
10#include <dm.h>
11#include <errno.h>
12#include <fdtdec.h>
13#include <hang.h>
14#include <ram.h>
15#include <reset.h>
16#include "sdram_soc64.h"
17#include <wait_bit.h>
18#include <asm/arch/firewall.h>
19#include <asm/arch/handoff_soc64.h>
20#include <asm/arch/misc.h>
21#include <asm/arch/reset_manager.h>
22#include <asm/arch/system_manager.h>
23#include <asm/io.h>
24#include <linux/err.h>
25#include <linux/sizes.h>
26
27DECLARE_GLOBAL_DATA_PTR;
28
29/* MPFE NOC registers */
30#define FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0 0xF8024050
31
32/* Memory reset manager */
33#define MEM_RST_MGR_STATUS 0x8
34
35/* Register and bit in memory reset manager */
36#define MEM_RST_MGR_STATUS_RESET_COMPLETE BIT(0)
37#define MEM_RST_MGR_STATUS_PWROKIN_STATUS BIT(1)
38#define MEM_RST_MGR_STATUS_CONTROLLER_RST BIT(2)
39#define MEM_RST_MGR_STATUS_AXI_RST BIT(3)
40
41#define TIMEOUT_200MS 200
42#define TIMEOUT_5000MS 5000
43
44/* DDR4 umctl2 */
45#define DDR4_MSTR_OFFSET 0x0
46#define DDR4_FREQ_RATIO BIT(22)
47
48#define DDR4_STAT_OFFSET 0x4
49#define DDR4_STAT_SELFREF_TYPE GENMASK(5, 4)
50#define DDR4_STAT_SELFREF_TYPE_SHIFT 4
51#define DDR4_STAT_OPERATING_MODE GENMASK(2, 0)
52
53#define DDR4_MRCTRL0_OFFSET 0x10
54#define DDR4_MRCTRL0_MR_TYPE BIT(0)
55#define DDR4_MRCTRL0_MPR_EN BIT(1)
56#define DDR4_MRCTRL0_MR_RANK GENMASK(5, 4)
57#define DDR4_MRCTRL0_MR_RANK_SHIFT 4
58#define DDR4_MRCTRL0_MR_ADDR GENMASK(15, 12)
59#define DDR4_MRCTRL0_MR_ADDR_SHIFT 12
60#define DDR4_MRCTRL0_MR_WR BIT(31)
61
62#define DDR4_MRCTRL1_OFFSET 0x14
63#define DDR4_MRCTRL1_MR_DATA 0x3FFFF
64
65#define DDR4_MRSTAT_OFFSET 0x18
66#define DDR4_MRSTAT_MR_WR_BUSY BIT(0)
67
68#define DDR4_MRCTRL2_OFFSET 0x1C
69
70#define DDR4_PWRCTL_OFFSET 0x30
71#define DDR4_PWRCTL_SELFREF_EN BIT(0)
72#define DDR4_PWRCTL_POWERDOWN_EN BIT(1)
73#define DDR4_PWRCTL_EN_DFI_DRAM_CLK_DISABLE BIT(3)
74#define DDR4_PWRCTL_SELFREF_SW BIT(5)
75
76#define DDR4_PWRTMG_OFFSET 0x34
77#define DDR4_HWLPCTL_OFFSET 0x38
78#define DDR4_RFSHCTL0_OFFSET 0x50
79#define DDR4_RFSHCTL1_OFFSET 0x54
80
81#define DDR4_RFSHCTL3_OFFSET 0x60
82#define DDR4_RFSHCTL3_DIS_AUTO_REFRESH BIT(0)
83#define DDR4_RFSHCTL3_REFRESH_MODE GENMASK(6, 4)
84#define DDR4_RFSHCTL3_REFRESH_MODE_SHIFT 4
85
86#define DDR4_ECCCFG0_OFFSET 0x70
87#define DDR4_ECC_MODE GENMASK(2, 0)
88#define DDR4_DIS_SCRUB BIT(4)
89#define LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT 30
90#define LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT 8
91
92#define DDR4_ECCCFG1_OFFSET 0x74
93#define LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK BIT(4)
94
95#define DDR4_CRCPARCTL0_OFFSET 0xC0
96#define DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR BIT(1)
97
98#define DDR4_CRCPARCTL1_OFFSET 0xC4
99#define DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE BIT(8)
100#define DDR4_CRCPARCTL1_ALERT_WAIT_FOR_SW BIT(9)
101
102#define DDR4_CRCPARSTAT_OFFSET 0xCC
103#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_INT BIT(16)
104#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_FATL_INT BIT(17)
105#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_NO_SW BIT(19)
106#define DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW BIT(29)
107
108#define DDR4_INIT0_OFFSET 0xD0
109#define DDR4_INIT0_SKIP_RAM_INIT GENMASK(31, 30)
110
111#define DDR4_RANKCTL_OFFSET 0xF4
112#define DDR4_RANKCTL_DIFF_RANK_RD_GAP GENMASK(7, 4)
113#define DDR4_RANKCTL_DIFF_RANK_WR_GAP GENMASK(11, 8)
114#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB BIT(24)
115#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB BIT(26)
116#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_SHIFT 4
117#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_SHIFT 8
118#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB_SHIFT 24
119#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB_SHIFT 26
120
121#define DDR4_RANKCTL1_OFFSET 0xF8
122#define DDR4_RANKCTL1_WR2RD_DR GENMASK(5, 0)
123
124#define DDR4_DRAMTMG2_OFFSET 0x108
125#define DDR4_DRAMTMG2_WR2RD GENMASK(5, 0)
126#define DDR4_DRAMTMG2_RD2WR GENMASK(13, 8)
127#define DDR4_DRAMTMG2_RD2WR_SHIFT 8
128
129#define DDR4_DRAMTMG9_OFFSET 0x124
130#define DDR4_DRAMTMG9_W2RD_S GENMASK(5, 0)
131
132#define DDR4_DFITMG1_OFFSET 0x194
133#define DDR4_DFITMG1_DFI_T_WRDATA_DELAY GENMASK(20, 16)
134#define DDR4_DFITMG1_DFI_T_WRDATA_SHIFT 16
135
136#define DDR4_DFIMISC_OFFSET 0x1B0
137#define DDR4_DFIMISC_DFI_INIT_COMPLETE_EN BIT(0)
138#define DDR4_DFIMISC_DFI_INIT_START BIT(5)
139
140#define DDR4_DFISTAT_OFFSET 0x1BC
141#define DDR4_DFI_INIT_COMPLETE BIT(0)
142
143#define DDR4_DBG0_OFFSET 0x300
144
145#define DDR4_DBG1_OFFSET 0x304
146#define DDR4_DBG1_DISDQ BIT(0)
147#define DDR4_DBG1_DIS_HIF BIT(1)
148
149#define DDR4_DBGCAM_OFFSET 0x308
150#define DDR4_DBGCAM_DBG_RD_Q_EMPTY BIT(25)
151#define DDR4_DBGCAM_DBG_WR_Q_EMPTY BIT(26)
152#define DDR4_DBGCAM_RD_DATA_PIPELINE_EMPTY BIT(28)
153#define DDR4_DBGCAM_WR_DATA_PIPELINE_EMPTY BIT(29)
154
155#define DDR4_SWCTL_OFFSET 0x320
156#define DDR4_SWCTL_SW_DONE BIT(0)
157
158#define DDR4_SWSTAT_OFFSET 0x324
159#define DDR4_SWSTAT_SW_DONE_ACK BIT(0)
160
161#define DDR4_PSTAT_OFFSET 0x3FC
162#define DDR4_PSTAT_RD_PORT_BUSY_0 BIT(0)
163#define DDR4_PSTAT_WR_PORT_BUSY_0 BIT(16)
164
165#define DDR4_PCTRL0_OFFSET 0x490
166#define DDR4_PCTRL0_PORT_EN BIT(0)
167
168#define DDR4_SBRCTL_OFFSET 0xF24
169#define DDR4_SBRCTL_SCRUB_INTERVAL 0x1FFF00
170#define DDR4_SBRCTL_SCRUB_EN BIT(0)
171#define DDR4_SBRCTL_SCRUB_WRITE BIT(2)
172#define DDR4_SBRCTL_SCRUB_BURST_1 BIT(4)
173
174#define DDR4_SBRSTAT_OFFSET 0xF28
175#define DDR4_SBRSTAT_SCRUB_BUSY BIT(0)
176#define DDR4_SBRSTAT_SCRUB_DONE BIT(1)
177
178#define DDR4_SBRWDATA0_OFFSET 0xF2C
179#define DDR4_SBRWDATA1_OFFSET 0xF30
180#define DDR4_SBRSTART0_OFFSET 0xF38
181#define DDR4_SBRSTART1_OFFSET 0xF3C
182#define DDR4_SBRRANGE0_OFFSET 0xF40
183#define DDR4_SBRRANGE1_OFFSET 0xF44
184
185/* DDR PHY */
186#define DDR_PHY_TXODTDRVSTREN_B0_P0 0x2009A
187#define DDR_PHY_RXPBDLYTG0_R0 0x200D0
188#define DDR_PHY_DBYTE0_TXDQDLYTG0_U0_P0 0x201A0
189
190#define DDR_PHY_DBYTE0_TXDQDLYTG0_U1_P0 0x203A0
191#define DDR_PHY_DBYTE1_TXDQDLYTG0_U0_P0 0x221A0
192#define DDR_PHY_DBYTE1_TXDQDLYTG0_U1_P0 0x223A0
193#define DDR_PHY_TXDQDLYTG0_COARSE_DELAY GENMASK(9, 6)
194#define DDR_PHY_TXDQDLYTG0_COARSE_DELAY_SHIFT 6
195
196#define DDR_PHY_CALRATE_OFFSET 0x40110
197#define DDR_PHY_CALZAP_OFFSET 0x40112
198#define DDR_PHY_SEQ0BDLY0_P0_OFFSET 0x40016
199#define DDR_PHY_SEQ0BDLY1_P0_OFFSET 0x40018
200#define DDR_PHY_SEQ0BDLY2_P0_OFFSET 0x4001A
201#define DDR_PHY_SEQ0BDLY3_P0_OFFSET 0x4001C
202
203#define DDR_PHY_MEMRESETL_OFFSET 0x400C0
204#define DDR_PHY_MEMRESETL_VALUE BIT(0)
205#define DDR_PHY_PROTECT_MEMRESET BIT(1)
206
207#define DDR_PHY_CALBUSY_OFFSET 0x4012E
208#define DDR_PHY_CALBUSY BIT(0)
209
210#define DDR_PHY_TRAIN_IMEM_OFFSET 0xA0000
211#define DDR_PHY_TRAIN_DMEM_OFFSET 0xA8000
212
213#define DMEM_MB_CDD_RR_1_0_OFFSET 0xA802C
214#define DMEM_MB_CDD_RR_0_1_OFFSET 0xA8030
215#define DMEM_MB_CDD_WW_1_0_OFFSET 0xA8038
216#define DMEM_MB_CDD_WW_0_1_OFFSET 0xA803C
217#define DMEM_MB_CDD_RW_1_1_OFFSET 0xA8046
218#define DMEM_MB_CDD_RW_1_0_OFFSET 0xA8048
219#define DMEM_MB_CDD_RW_0_1_OFFSET 0xA804A
220#define DMEM_MB_CDD_RW_0_0_OFFSET 0xA804C
221
222#define DMEM_MB_CDD_CHA_RR_1_0_OFFSET 0xA8026
223#define DMEM_MB_CDD_CHA_RR_0_1_OFFSET 0xA8026
224#define DMEM_MB_CDD_CHB_RR_1_0_OFFSET 0xA8058
225#define DMEM_MB_CDD_CHB_RR_0_1_OFFSET 0xA805A
226#define DMEM_MB_CDD_CHA_WW_1_0_OFFSET 0xA8030
227#define DMEM_MB_CDD_CHA_WW_0_1_OFFSET 0xA8030
228#define DMEM_MB_CDD_CHB_WW_1_0_OFFSET 0xA8062
229#define DMEM_MB_CDD_CHB_WW_0_1_OFFSET 0xA8064
230
231#define DMEM_MB_CDD_CHA_RW_1_1_OFFSET 0xA8028
232#define DMEM_MB_CDD_CHA_RW_1_0_OFFSET 0xA8028
233#define DMEM_MB_CDD_CHA_RW_0_1_OFFSET 0xA802A
234#define DMEM_MB_CDD_CHA_RW_0_0_OFFSET 0xA802A
235
236#define DMEM_MB_CDD_CHB_RW_1_1_OFFSET 0xA805A
237#define DMEM_MB_CDD_CHB_RW_1_0_OFFSET 0xA805C
238#define DMEM_MB_CDD_CHB_RW_0_1_OFFSET 0xA805c
239#define DMEM_MB_CDD_CHB_RW_0_0_OFFSET 0xA805E
240
241#define DDR_PHY_SEQ0DISABLEFLAG0_OFFSET 0x120018
242#define DDR_PHY_SEQ0DISABLEFLAG1_OFFSET 0x12001A
243#define DDR_PHY_SEQ0DISABLEFLAG2_OFFSET 0x12001C
244#define DDR_PHY_SEQ0DISABLEFLAG3_OFFSET 0x12001E
245#define DDR_PHY_SEQ0DISABLEFLAG4_OFFSET 0x120020
246#define DDR_PHY_SEQ0DISABLEFLAG5_OFFSET 0x120022
247#define DDR_PHY_SEQ0DISABLEFLAG6_OFFSET 0x120024
248#define DDR_PHY_SEQ0DISABLEFLAG7_OFFSET 0x120026
249
250#define DDR_PHY_UCCLKHCLKENABLES_OFFSET 0x180100
251#define DDR_PHY_UCCLKHCLKENABLES_UCCLKEN BIT(0)
252#define DDR_PHY_UCCLKHCLKENABLES_HCLKEN BIT(1)
253
254#define DDR_PHY_UCTWRITEPROT_OFFSET 0x180066
255#define DDR_PHY_UCTWRITEPROT BIT(0)
256
257#define DDR_PHY_APBONLY0_OFFSET 0x1A0000
258#define DDR_PHY_MICROCONTMUXSEL BIT(0)
259
260#define DDR_PHY_UCTSHADOWREGS_OFFSET 0x1A0008
261#define DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW BIT(0)
262
263#define DDR_PHY_DCTWRITEPROT_OFFSET 0x1A0062
264#define DDR_PHY_DCTWRITEPROT BIT(0)
265
266#define DDR_PHY_UCTWRITEONLYSHADOW_OFFSET 0x1A0064
267#define DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET 0x1A0068
268
269#define DDR_PHY_MICRORESET_OFFSET 0x1A0132
270#define DDR_PHY_MICRORESET_STALL BIT(0)
271#define DDR_PHY_MICRORESET_RESET BIT(3)
272
273#define DDR_PHY_TXODTDRVSTREN_B0_P1 0x22009A
274
275/* For firmware training */
276#define HW_DBG_TRACE_CONTROL_OFFSET 0x18
277#define FW_TRAINING_COMPLETED_STAT 0x07
278#define FW_TRAINING_FAILED_STAT 0xFF
279#define FW_COMPLETION_MSG_ONLY_MODE 0xFF
280#define FW_STREAMING_MSG_ID 0x08
281#define GET_LOWHW_DATA(x) ((x) & 0xFFFF)
282#define GET_LOWB_DATA(x) ((x) & 0xFF)
283#define GET_HIGHB_DATA(x) (((x) & 0xFF00) >> 8)
284
285/* Operating mode */
286#define OPM_INIT 0x000
287#define OPM_NORMAL 0x001
288#define OPM_PWR_D0WN 0x010
289#define OPM_SELF_SELFREF 0x011
290#define OPM_DDR4_DEEP_PWR_DOWN 0x100
291
292/* Refresh mode */
293#define FIXED_1X 0
294#define FIXED_2X BIT(0)
295#define FIXED_4X BIT(4)
296
297/* Address of mode register */
298#define MR0 0x0000
299#define MR1 0x0001
300#define MR2 0x0010
301#define MR3 0x0011
302#define MR4 0x0100
303#define MR5 0x0101
304#define MR6 0x0110
305#define MR7 0x0111
306
307/* MR rank */
308#define RANK0 0x1
309#define RANK1 0x2
310#define ALL_RANK 0x3
311
312#define MR5_BIT4 BIT(4)
313
314/* Value for ecc_region_map */
315#define ALL_PROTECTED 0x7F
316
317/* Region size for ECCCFG0.ecc_region_map */
318enum region_size {
319 ONE_EIGHT,
320 ONE_SIXTEENTH,
321 ONE_THIRTY_SECOND,
322 ONE_SIXTY_FOURTH
323};
324
325enum ddr_type {
326 DDRTYPE_LPDDR4_0,
327 DDRTYPE_LPDDR4_1,
328 DDRTYPE_DDR4,
329 DDRTYPE_UNKNOWN
330};
331
332/* Reset type */
333enum reset_type {
334 POR_RESET,
335 WARM_RESET,
336 COLD_RESET
337};
338
339/* DDR handoff structure */
340struct ddr_handoff {
341 /* Memory reset manager base */
342 phys_addr_t mem_reset_base;
343
344 /* First controller attributes */
345 phys_addr_t cntlr_handoff_base;
346 phys_addr_t cntlr_base;
347 size_t cntlr_total_length;
348 enum ddr_type cntlr_t;
349 size_t cntlr_handoff_length;
350
351 /* Second controller attributes*/
352 phys_addr_t cntlr2_handoff_base;
353 phys_addr_t cntlr2_base;
354 size_t cntlr2_total_length;
355 enum ddr_type cntlr2_t;
356 size_t cntlr2_handoff_length;
357
358 /* PHY attributes */
359 phys_addr_t phy_handoff_base;
360 phys_addr_t phy_base;
361 size_t phy_total_length;
362 size_t phy_handoff_length;
363
364 /* PHY engine attributes */
365 phys_addr_t phy_engine_handoff_base;
366 size_t phy_engine_total_length;
367 size_t phy_engine_handoff_length;
368
369 /* Calibration attributes */
370 phys_addr_t train_imem_base;
371 phys_addr_t train_dmem_base;
372 size_t train_imem_length;
373 size_t train_dmem_length;
374};
375
376/* Message mode */
377enum message_mode {
378 MAJOR_MESSAGE,
379 STREAMING_MESSAGE
380};
381
382static int clr_ca_parity_error_status(phys_addr_t umctl2_base)
383{
384 int ret;
385
386 debug("%s: Clear C/A parity error status in MR5[4]\n", __func__);
387
388 /* Set mode register MRS */
389 clrbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MPR_EN);
390
391 /* Set mode register to write operation */
392 setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MR_TYPE);
393
394 /* Set the address of mode rgister to 0x101(MR5) */
395 setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET,
396 (MR5 << DDR4_MRCTRL0_MR_ADDR_SHIFT) &
397 DDR4_MRCTRL0_MR_ADDR);
398
399 /* Set MR rank to rank 1 */
400 setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET,
401 (RANK1 << DDR4_MRCTRL0_MR_RANK_SHIFT) &
402 DDR4_MRCTRL0_MR_RANK);
403
404 /* Clear C/A parity error status in MR5[4] */
405 clrbits_le32(umctl2_base + DDR4_MRCTRL1_OFFSET, MR5_BIT4);
406
407 /* Trigger mode register read or write operation */
408 setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MR_WR);
409
410 /* Wait for retry done */
411 ret = wait_for_bit_le32((const void *)(umctl2_base +
412 DDR4_MRSTAT_OFFSET), DDR4_MRSTAT_MR_WR_BUSY,
413 false, TIMEOUT_200MS, false);
414 if (ret) {
415 debug("%s: Timeout while waiting for", __func__);
416 debug(" no outstanding MR transaction\n");
417 return ret;
418 }
419
420 return 0;
421}
422
423static int ddr_retry_software_sequence(phys_addr_t umctl2_base)
424{
425 u32 value;
426 int ret;
427
428 /* Check software can perform MRS/MPR/PDA? */
429 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
430 DDR4_CRCPARSTAT_DFI_ALERT_ERR_NO_SW;
431
432 if (value) {
433 /* Clear interrupt bit for DFI alert error */
434 setbits_le32(umctl2_base + DDR4_CRCPARCTL0_OFFSET,
435 DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR);
436 }
437
438 debug("%s: Software can perform MRS/MPR/PDA\n", __func__);
439
440 ret = wait_for_bit_le32((const void *)(umctl2_base +
441 DDR4_MRSTAT_OFFSET),
442 DDR4_MRSTAT_MR_WR_BUSY,
443 false, TIMEOUT_200MS, false);
444 if (ret) {
445 debug("%s: Timeout while waiting for", __func__);
446 debug(" no outstanding MR transaction\n");
447 return ret;
448 }
449
450 ret = clr_ca_parity_error_status(umctl2_base);
451 if (ret)
452 return ret;
453
454 if (!value) {
455 /* Clear interrupt bit for DFI alert error */
456 setbits_le32(umctl2_base + DDR4_CRCPARCTL0_OFFSET,
457 DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR);
458 }
459
460 return 0;
461}
462
463static int ensure_retry_procedure_complete(phys_addr_t umctl2_base)
464{
465 u32 value;
466 u32 start = get_timer(0);
467 int ret;
468
469 /* Check parity/crc/error window is emptied ? */
470 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
471 DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW;
472
473 /* Polling until parity/crc/error window is emptied */
474 while (value) {
475 if (get_timer(start) > TIMEOUT_200MS) {
476 debug("%s: Timeout while waiting for",
477 __func__);
478 debug(" parity/crc/error window empty\n");
479 return -ETIMEDOUT;
480 }
481
482 /* Check software intervention is enabled? */
483 value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
484 DDR4_CRCPARCTL1_ALERT_WAIT_FOR_SW;
485 if (value) {
486 debug("%s: Software intervention is enabled\n",
487 __func__);
488
489 /* Check dfi alert error interrupt is set? */
490 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
491 DDR4_CRCPARSTAT_DFI_ALERT_ERR_INT;
492
493 if (value) {
494 ret = ddr_retry_software_sequence(umctl2_base);
495 debug("%s: DFI alert error interrupt ",
496 __func__);
497 debug("is set\n");
498
499 if (ret)
500 return ret;
501 }
502
503 /*
504 * Check fatal parity error interrupt is set?
505 */
506 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
507 DDR4_CRCPARSTAT_DFI_ALERT_ERR_FATL_INT;
508 if (value) {
509 printf("%s: Fatal parity error ",
510 __func__);
511 printf("interrupt is set, Hang it!!\n");
512 hang();
513 }
514 }
515
516 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
517 DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW;
518
519 udelay(1);
520 WATCHDOG_RESET();
521 }
522
523 return 0;
524}
525
526static int enable_quasi_dynamic_reg_grp3(phys_addr_t umctl2_base,
527 enum ddr_type umctl2_type)
528{
529 u32 i, value, backup;
530 int ret = 0;
531
532 /* Disable input traffic per port */
533 clrbits_le32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
534
535 /* Polling AXI port until idle */
536 ret = wait_for_bit_le32((const void *)(umctl2_base +
537 DDR4_PSTAT_OFFSET),
538 DDR4_PSTAT_WR_PORT_BUSY_0 |
539 DDR4_PSTAT_RD_PORT_BUSY_0, false,
540 TIMEOUT_200MS, false);
541 if (ret) {
542 debug("%s: Timeout while waiting for", __func__);
543 debug(" controller idle\n");
544 return ret;
545 }
546
547 /* Backup user setting */
548 backup = readl(umctl2_base + DDR4_DBG1_OFFSET);
549
550 /* Disable input traffic to the controller */
551 setbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DIS_HIF);
552
553 /*
554 * Ensure CAM/data pipelines are empty.
555 * Poll until CAM/data pipelines are set at least twice,
556 * timeout at 200ms
557 */
558 for (i = 0; i < 2; i++) {
559 ret = wait_for_bit_le32((const void *)(umctl2_base +
560 DDR4_DBGCAM_OFFSET),
561 DDR4_DBGCAM_WR_DATA_PIPELINE_EMPTY |
562 DDR4_DBGCAM_RD_DATA_PIPELINE_EMPTY |
563 DDR4_DBGCAM_DBG_WR_Q_EMPTY |
564 DDR4_DBGCAM_DBG_RD_Q_EMPTY, true,
565 TIMEOUT_200MS, false);
566 if (ret) {
567 debug("%s: loop(%u): Timeout while waiting for",
568 __func__, i + 1);
569 debug(" CAM/data pipelines are empty\n");
570
571 goto out;
572 }
573 }
574
575 if (umctl2_type == DDRTYPE_DDR4) {
576 /* Check DDR4 retry is enabled ? */
577 value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
578 DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE;
579
580 if (value) {
581 debug("%s: DDR4 retry is enabled\n", __func__);
582
583 ret = ensure_retry_procedure_complete(umctl2_base);
584 if (ret) {
585 debug("%s: Timeout while waiting for",
586 __func__);
587 debug(" retry procedure complete\n");
588
589 goto out;
590 }
591 }
592 }
593
594 debug("%s: Quasi-dynamic group 3 registers are enabled\n", __func__);
595
596out:
597 /* Restore user setting */
598 writel(backup, umctl2_base + DDR4_DBG1_OFFSET);
599
600 return ret;
601}
602
603static enum ddr_type get_ddr_type(phys_addr_t ddr_type_location)
604{
605 u32 ddr_type_magic = readl(ddr_type_location);
606
607 if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_DDR4_TYPE)
608 return DDRTYPE_DDR4;
609
610 if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_0_TYPE)
611 return DDRTYPE_LPDDR4_0;
612
613 if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_1_TYPE)
614 return DDRTYPE_LPDDR4_1;
615
616 return DDRTYPE_UNKNOWN;
617}
618
619static void use_lpddr4_interleaving(bool set)
620{
621 if (set) {
622 printf("Starting LPDDR4 interleaving configuration ...\n");
623 setbits_le32(FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0,
624 BIT(5));
625 } else {
626 printf("Starting LPDDR4 non-interleaving configuration ...\n");
627 clrbits_le32(FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0,
628 BIT(5));
629 }
630}
631
632static void use_ddr4(enum ddr_type type)
633{
634 if (type == DDRTYPE_DDR4) {
635 printf("Starting DDR4 configuration ...\n");
636 setbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_DDR_MODE,
637 SYSMGR_SOC64_DDR_MODE_MSK);
638 } else if (type == DDRTYPE_LPDDR4_0) {
639 printf("Starting LPDDR4 configuration ...\n");
640 clrbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_DDR_MODE,
641 SYSMGR_SOC64_DDR_MODE_MSK);
642
643 use_lpddr4_interleaving(false);
644 }
645}
646
647static int scrubber_ddr_config(phys_addr_t umctl2_base,
648 enum ddr_type umctl2_type)
649{
650 u32 backup[9];
651 int ret;
652
653 /* Reset to default value, prevent scrubber stop due to lower power */
654 writel(0, umctl2_base + DDR4_PWRCTL_OFFSET);
655
656 /* Backup user settings */
657 backup[0] = readl(umctl2_base + DDR4_SBRCTL_OFFSET);
658 backup[1] = readl(umctl2_base + DDR4_SBRWDATA0_OFFSET);
659 backup[2] = readl(umctl2_base + DDR4_SBRSTART0_OFFSET);
660 if (umctl2_type == DDRTYPE_DDR4) {
661 backup[3] = readl(umctl2_base + DDR4_SBRWDATA1_OFFSET);
662 backup[4] = readl(umctl2_base + DDR4_SBRSTART1_OFFSET);
663 }
664 backup[5] = readl(umctl2_base + DDR4_SBRRANGE0_OFFSET);
665 backup[6] = readl(umctl2_base + DDR4_SBRRANGE1_OFFSET);
666 backup[7] = readl(umctl2_base + DDR4_ECCCFG0_OFFSET);
667 backup[8] = readl(umctl2_base + DDR4_ECCCFG1_OFFSET);
668
669 if (umctl2_type != DDRTYPE_DDR4) {
670 /* Lock ECC region, ensure this regions is not being accessed */
671 setbits_le32(umctl2_base + DDR4_ECCCFG1_OFFSET,
672 LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK);
673 }
674 /* Disable input traffic per port */
675 clrbits_le32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
676 /* Disables scrubber */
677 clrbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
678 /* Polling all scrub writes data have been sent */
679 ret = wait_for_bit_le32((const void *)(umctl2_base +
680 DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_BUSY,
681 false, TIMEOUT_5000MS, false);
682 if (ret) {
683 debug("%s: Timeout while waiting for", __func__);
684 debug(" sending all scrub data\n");
685 return ret;
686 }
687
688 /* LPDDR4 supports inline ECC only */
689 if (umctl2_type != DDRTYPE_DDR4) {
690 /*
691 * Setting all regions for protected, this is required for
692 * srubber to init whole LPDDR4 expect ECC region
693 */
694 writel(((ONE_EIGHT <<
695 LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT) |
696 (ALL_PROTECTED << LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT)),
697 umctl2_base + DDR4_ECCCFG0_OFFSET);
698 }
699
700 /* Scrub_burst = 1, scrub_mode = 1(performs writes) */
701 writel(DDR4_SBRCTL_SCRUB_BURST_1 | DDR4_SBRCTL_SCRUB_WRITE,
702 umctl2_base + DDR4_SBRCTL_OFFSET);
703
704 /* Zeroing whole DDR */
705 writel(0, umctl2_base + DDR4_SBRWDATA0_OFFSET);
706 writel(0, umctl2_base + DDR4_SBRSTART0_OFFSET);
707 if (umctl2_type == DDRTYPE_DDR4) {
708 writel(0, umctl2_base + DDR4_SBRWDATA1_OFFSET);
709 writel(0, umctl2_base + DDR4_SBRSTART1_OFFSET);
710 }
711 writel(0, umctl2_base + DDR4_SBRRANGE0_OFFSET);
712 writel(0, umctl2_base + DDR4_SBRRANGE1_OFFSET);
713
714 /* Enables scrubber */
715 setbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
716 /* Polling all scrub writes commands have been sent */
717 ret = wait_for_bit_le32((const void *)(umctl2_base +
718 DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_DONE,
719 true, TIMEOUT_5000MS, false);
720 if (ret) {
721 debug("%s: Timeout while waiting for", __func__);
722 debug(" sending all scrub commands\n");
723 return ret;
724 }
725
726 /* Polling all scrub writes data have been sent */
727 ret = wait_for_bit_le32((const void *)(umctl2_base +
728 DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_BUSY,
729 false, TIMEOUT_5000MS, false);
730 if (ret) {
731 printf("%s: Timeout while waiting for", __func__);
732 printf(" sending all scrub data\n");
733 return ret;
734 }
735
736 /* Disables scrubber */
737 clrbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
738
739 /* Restore user settings */
740 writel(backup[0], umctl2_base + DDR4_SBRCTL_OFFSET);
741 writel(backup[1], umctl2_base + DDR4_SBRWDATA0_OFFSET);
742 writel(backup[2], umctl2_base + DDR4_SBRSTART0_OFFSET);
743 if (umctl2_type == DDRTYPE_DDR4) {
744 writel(backup[3], umctl2_base + DDR4_SBRWDATA1_OFFSET);
745 writel(backup[4], umctl2_base + DDR4_SBRSTART1_OFFSET);
746 }
747 writel(backup[5], umctl2_base + DDR4_SBRRANGE0_OFFSET);
748 writel(backup[6], umctl2_base + DDR4_SBRRANGE1_OFFSET);
749 writel(backup[7], umctl2_base + DDR4_ECCCFG0_OFFSET);
750 writel(backup[8], umctl2_base + DDR4_ECCCFG1_OFFSET);
751
752 /* Enables ECC scrub on scrubber */
753 if (!(readl(umctl2_base + DDR4_SBRCTL_OFFSET) &
754 DDR4_SBRCTL_SCRUB_WRITE)) {
755 /* Enables scrubber */
756 setbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET,
757 DDR4_SBRCTL_SCRUB_EN);
758 }
759
760 return 0;
761}
762
763static void handoff_process(struct ddr_handoff *ddr_handoff_info,
764 phys_addr_t handoff_base, size_t length,
765 phys_addr_t base)
766{
767 u32 handoff_table[length];
768 u32 i, value = 0;
769
770 /* Execute configuration handoff */
771 socfpga_handoff_read((void *)handoff_base, handoff_table, length);
772
773 for (i = 0; i < length; i = i + 2) {
774 debug("%s: wr = 0x%08x ", __func__, handoff_table[i + 1]);
775 if (ddr_handoff_info && base == ddr_handoff_info->phy_base) {
776 /*
777 * Convert PHY odd offset to even offset that
778 * supported by ARM processor.
779 */
780 value = handoff_table[i] << 1;
781
782 writew(handoff_table[i + 1],
783 (uintptr_t)(value + base));
784 debug("rd = 0x%08x ",
785 readw((uintptr_t)(value + base)));
786 debug("PHY offset: 0x%08x ", handoff_table[i + 1]);
787 } else {
788 value = handoff_table[i];
789 writel(handoff_table[i + 1], (uintptr_t)(value +
790 base));
791 debug("rd = 0x%08x ",
792 readl((uintptr_t)(value + base)));
793 }
794
795 debug("Absolute addr: 0x%08llx, APB offset: 0x%08x\n",
796 value + base, value);
797 }
798}
799
800static int init_umctl2(phys_addr_t umctl2_handoff_base,
801 phys_addr_t umctl2_base, enum ddr_type umctl2_type,
802 size_t umctl2_handoff_length,
803 u32 *user_backup)
804{
805 int ret;
806
807 if (umctl2_type == DDRTYPE_DDR4)
808 printf("Initializing DDR4 controller ...\n");
809 else if (umctl2_type == DDRTYPE_LPDDR4_0)
810 printf("Initializing LPDDR4_0 controller ...\n");
811 else if (umctl2_type == DDRTYPE_LPDDR4_1)
812 printf("Initializing LPDDR4_1 controller ...\n");
813
814 /* Prevent controller from issuing read/write to SDRAM */
815 setbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DISDQ);
816
817 /* Put SDRAM into self-refresh */
818 setbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_EN);
819
820 /* Enable quasi-dynamic programing of the controller registers */
821 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
822
823 /* Ensure the controller is in initialization mode */
824 ret = wait_for_bit_le32((const void *)(umctl2_base + DDR4_STAT_OFFSET),
825 DDR4_STAT_OPERATING_MODE, false, TIMEOUT_200MS,
826 false);
827 if (ret) {
828 debug("%s: Timeout while waiting for", __func__);
829 debug(" init operating mode\n");
830 return ret;
831 }
832
833 debug("%s: UMCTL2 handoff base address = 0x%p table length = 0x%08x\n",
834 __func__, (u32 *)umctl2_handoff_base,
835 (u32)umctl2_handoff_length);
836
837 handoff_process(NULL, umctl2_handoff_base, umctl2_handoff_length,
838 umctl2_base);
839
840 /* Backup user settings, restore after DDR up running */
841 *user_backup = readl(umctl2_base + DDR4_PWRCTL_OFFSET);
842
843 /* Disable self resfresh */
844 clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_EN);
845
846 if (umctl2_type == DDRTYPE_LPDDR4_0 ||
847 umctl2_type == DDRTYPE_LPDDR4_1) {
848 /* Setting selfref_sw to 1, based on lpddr4 requirement */
849 setbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET,
850 DDR4_PWRCTL_SELFREF_SW);
851
852 /* Backup user settings, restore after DDR up running */
853 user_backup++;
854 *user_backup = readl(umctl2_base + DDR4_INIT0_OFFSET) &
855 DDR4_INIT0_SKIP_RAM_INIT;
856
857 /*
858 * Setting INIT0.skip_dram_init to 0x3, based on lpddr4
859 * requirement
860 */
861 setbits_le32(umctl2_base + DDR4_INIT0_OFFSET,
862 DDR4_INIT0_SKIP_RAM_INIT);
863 }
864
865 /* Complete quasi-dynamic register programming */
866 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
867
868 /* Enable controller from issuing read/write to SDRAM */
869 clrbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DISDQ);
870
871 return 0;
872}
873
874static int phy_pre_handoff_config(phys_addr_t umctl2_base,
875 enum ddr_type umctl2_type)
876{
877 int ret;
878 u32 value;
879
880 if (umctl2_type == DDRTYPE_DDR4) {
881 /* Check DDR4 retry is enabled ? */
882 value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
883 DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE;
884
885 if (value) {
886 debug("%s: DDR4 retry is enabled\n", __func__);
887 debug("%s: Disable auto refresh is not supported\n",
888 __func__);
889 } else {
890 /* Disable auto refresh */
891 setbits_le32(umctl2_base + DDR4_RFSHCTL3_OFFSET,
892 DDR4_RFSHCTL3_DIS_AUTO_REFRESH);
893 }
894 }
895
896 /* Disable selfref_en & powerdown_en, nvr disable dfi dram clk */
897 clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET,
898 DDR4_PWRCTL_EN_DFI_DRAM_CLK_DISABLE |
899 DDR4_PWRCTL_POWERDOWN_EN | DDR4_PWRCTL_SELFREF_EN);
900
901 /* Enable quasi-dynamic programing of the controller registers */
902 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
903
904 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
905 if (ret)
906 return ret;
907
908 /* Masking dfi init complete */
909 clrbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
910 DDR4_DFIMISC_DFI_INIT_COMPLETE_EN);
911
912 /* Complete quasi-dynamic register programming */
913 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
914
915 /* Polling programming done */
916 ret = wait_for_bit_le32((const void *)(umctl2_base +
917 DDR4_SWSTAT_OFFSET), DDR4_SWSTAT_SW_DONE_ACK,
918 true, TIMEOUT_200MS, false);
919 if (ret) {
920 debug("%s: Timeout while waiting for", __func__);
921 debug(" programming done\n");
922 }
923
924 return ret;
925}
926
927static int init_phy(struct ddr_handoff *ddr_handoff_info)
928{
929 int ret;
930
931 printf("Initializing DDR PHY ...\n");
932
933 if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
934 ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
935 ret = phy_pre_handoff_config(ddr_handoff_info->cntlr_base,
936 ddr_handoff_info->cntlr_t);
937 if (ret)
938 return ret;
939 }
940
941 if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
942 ret = phy_pre_handoff_config
943 (ddr_handoff_info->cntlr2_base,
944 ddr_handoff_info->cntlr2_t);
945 if (ret)
946 return ret;
947 }
948
949 /* Execute PHY configuration handoff */
950 handoff_process(ddr_handoff_info, ddr_handoff_info->phy_handoff_base,
951 ddr_handoff_info->phy_handoff_length,
952 ddr_handoff_info->phy_base);
953
954 printf("DDR PHY configuration is completed\n");
955
956 return 0;
957}
958
959static void phy_init_engine(struct ddr_handoff *handoff)
960{
961 printf("Load PHY Init Engine ...\n");
962
963 /* Execute PIE production code handoff */
964 handoff_process(handoff, handoff->phy_engine_handoff_base,
965 handoff->phy_engine_handoff_length, handoff->phy_base);
966
967 printf("End of loading PHY Init Engine\n");
968}
969
970int populate_ddr_handoff(struct ddr_handoff *handoff)
971{
972 phys_addr_t next_section_header;
973
974 /* DDR handoff */
975 handoff->mem_reset_base = SOC64_HANDOFF_DDR_MEMRESET_BASE;
976 debug("%s: DDR memory reset base = 0x%x\n", __func__,
977 (u32)handoff->mem_reset_base);
978 debug("%s: DDR memory reset address = 0x%x\n", __func__,
979 readl(handoff->mem_reset_base));
980
981 /* Beginning of DDR controller handoff */
982 handoff->cntlr_handoff_base = SOC64_HANDOFF_DDR_UMCTL2_SECTION;
983 debug("%s: cntlr handoff base = 0x%x\n", __func__,
984 (u32)handoff->cntlr_handoff_base);
985
986 /* Get 1st DDR type */
987 handoff->cntlr_t = get_ddr_type(handoff->cntlr_handoff_base +
988 SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET);
989 if (handoff->cntlr_t == DDRTYPE_LPDDR4_1 ||
990 handoff->cntlr_t == DDRTYPE_UNKNOWN) {
991 debug("%s: Wrong DDR handoff format, the 1st DDR ", __func__);
992 debug("type must be DDR4 or LPDDR4_0\n");
993 return -ENOEXEC;
994 }
995
996 /* 1st cntlr base physical address */
997 handoff->cntlr_base = readl(handoff->cntlr_handoff_base +
998 SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET);
999 debug("%s: cntlr base = 0x%x\n", __func__, (u32)handoff->cntlr_base);
1000
1001 /* Get the total length of DDR cntlr handoff section */
1002 handoff->cntlr_total_length = readl(handoff->cntlr_handoff_base +
1003 SOC64_HANDOFF_OFFSET_LENGTH);
1004 debug("%s: Umctl2 total length in byte = 0x%x\n", __func__,
1005 (u32)handoff->cntlr_total_length);
1006
1007 /* Get the length of user setting data in DDR cntlr handoff section */
1008 handoff->cntlr_handoff_length = socfpga_get_handoff_size((void *)
1009 handoff->cntlr_handoff_base);
1010 debug("%s: Umctl2 handoff length in word(32-bit) = 0x%x\n", __func__,
1011 (u32)handoff->cntlr_handoff_length);
1012
1013 /* Wrong format on user setting data */
1014 if (handoff->cntlr_handoff_length < 0) {
1015 debug("%s: Wrong format on user setting data\n", __func__);
1016 return -ENOEXEC;
1017 }
1018
1019 /* Get the next handoff section address */
1020 next_section_header = handoff->cntlr_handoff_base +
1021 handoff->cntlr_total_length;
1022 debug("%s: Next handoff section header location = 0x%llx\n", __func__,
1023 next_section_header);
1024
1025 /*
1026 * Checking next section handoff is cntlr or PHY, and changing
1027 * subsequent implementation accordingly
1028 */
1029 if (readl(next_section_header) == SOC64_HANDOFF_DDR_UMCTL2_MAGIC) {
1030 /* Get the next cntlr handoff section address */
1031 handoff->cntlr2_handoff_base = next_section_header;
1032 debug("%s: umctl2 2nd handoff base = 0x%x\n", __func__,
1033 (u32)handoff->cntlr2_handoff_base);
1034
1035 /* Get 2nd DDR type */
1036 handoff->cntlr2_t = get_ddr_type(handoff->cntlr2_handoff_base +
1037 SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET);
1038 if (handoff->cntlr2_t == DDRTYPE_LPDDR4_0 ||
1039 handoff->cntlr2_t == DDRTYPE_UNKNOWN) {
1040 debug("%s: Wrong DDR handoff format, the 2nd DDR ",
1041 __func__);
1042 debug("type must be LPDDR4_1\n");
1043 return -ENOEXEC;
1044 }
1045
1046 /* 2nd umctl2 base physical address */
1047 handoff->cntlr2_base =
1048 readl(handoff->cntlr2_handoff_base +
1049 SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET);
1050 debug("%s: cntlr2 base = 0x%x\n", __func__,
1051 (u32)handoff->cntlr2_base);
1052
1053 /* Get the total length of 2nd DDR umctl2 handoff section */
1054 handoff->cntlr2_total_length =
1055 readl(handoff->cntlr2_handoff_base +
1056 SOC64_HANDOFF_OFFSET_LENGTH);
1057 debug("%s: Umctl2_2nd total length in byte = 0x%x\n", __func__,
1058 (u32)handoff->cntlr2_total_length);
1059
1060 /*
1061 * Get the length of user setting data in DDR umctl2 handoff
1062 * section
1063 */
1064 handoff->cntlr2_handoff_length =
1065 socfpga_get_handoff_size((void *)
1066 handoff->cntlr2_handoff_base);
1067 debug("%s: cntlr2 handoff length in word(32-bit) = 0x%x\n",
1068 __func__,
1069 (u32)handoff->cntlr2_handoff_length);
1070
1071 /* Wrong format on user setting data */
1072 if (handoff->cntlr2_handoff_length < 0) {
1073 debug("%s: Wrong format on umctl2 user setting data\n",
1074 __func__);
1075 return -ENOEXEC;
1076 }
1077
1078 /* Get the next handoff section address */
1079 next_section_header = handoff->cntlr2_handoff_base +
1080 handoff->cntlr2_total_length;
1081 debug("%s: Next handoff section header location = 0x%llx\n",
1082 __func__, next_section_header);
1083 }
1084
1085 /* Checking next section handoff is PHY ? */
1086 if (readl(next_section_header) == SOC64_HANDOFF_DDR_PHY_MAGIC) {
1087 /* DDR PHY handoff */
1088 handoff->phy_handoff_base = next_section_header;
1089 debug("%s: PHY handoff base = 0x%x\n", __func__,
1090 (u32)handoff->phy_handoff_base);
1091
1092 /* PHY base physical address */
1093 handoff->phy_base = readl(handoff->phy_handoff_base +
1094 SOC64_HANDOFF_DDR_PHY_BASE_OFFSET);
1095 debug("%s: PHY base = 0x%x\n", __func__,
1096 (u32)handoff->phy_base);
1097
1098 /* Get the total length of PHY handoff section */
1099 handoff->phy_total_length = readl(handoff->phy_handoff_base +
1100 SOC64_HANDOFF_OFFSET_LENGTH);
1101 debug("%s: PHY total length in byte = 0x%x\n", __func__,
1102 (u32)handoff->phy_total_length);
1103
1104 /*
1105 * Get the length of user setting data in DDR PHY handoff
1106 * section
1107 */
1108 handoff->phy_handoff_length = socfpga_get_handoff_size((void *)
1109 handoff->phy_handoff_base);
1110 debug("%s: PHY handoff length in word(32-bit) = 0x%x\n",
1111 __func__, (u32)handoff->phy_handoff_length);
1112
1113 /* Wrong format on PHY user setting data */
1114 if (handoff->phy_handoff_length < 0) {
1115 debug("%s: Wrong format on PHY user setting data\n",
1116 __func__);
1117 return -ENOEXEC;
1118 }
1119
1120 /* Get the next handoff section address */
1121 next_section_header = handoff->phy_handoff_base +
1122 handoff->phy_total_length;
1123 debug("%s: Next handoff section header location = 0x%llx\n",
1124 __func__, next_section_header);
1125 } else {
1126 debug("%s: Wrong format for DDR handoff, expect PHY",
1127 __func__);
1128 debug(" handoff section after umctl2 handoff section\n");
1129 return -ENOEXEC;
1130 }
1131
1132 /* Checking next section handoff is PHY init Engine ? */
1133 if (readl(next_section_header) ==
1134 SOC64_HANDOFF_DDR_PHY_INIT_ENGINE_MAGIC) {
1135 /* DDR PHY Engine handoff */
1136 handoff->phy_engine_handoff_base = next_section_header;
1137 debug("%s: PHY init engine handoff base = 0x%x\n", __func__,
1138 (u32)handoff->phy_engine_handoff_base);
1139
1140 /* Get the total length of PHY init engine handoff section */
1141 handoff->phy_engine_total_length =
1142 readl(handoff->phy_engine_handoff_base +
1143 SOC64_HANDOFF_OFFSET_LENGTH);
1144 debug("%s: PHY engine total length in byte = 0x%x\n", __func__,
1145 (u32)handoff->phy_engine_total_length);
1146
1147 /*
1148 * Get the length of user setting data in DDR PHY init engine
1149 * handoff section
1150 */
1151 handoff->phy_engine_handoff_length =
1152 socfpga_get_handoff_size((void *)
1153 handoff->phy_engine_handoff_base);
1154 debug("%s: PHY engine handoff length in word(32-bit) = 0x%x\n",
1155 __func__, (u32)handoff->phy_engine_handoff_length);
1156
1157 /* Wrong format on PHY init engine setting data */
1158 if (handoff->phy_engine_handoff_length < 0) {
1159 debug("%s: Wrong format on PHY init engine ",
1160 __func__);
1161 debug("user setting data\n");
1162 return -ENOEXEC;
1163 }
1164 } else {
1165 debug("%s: Wrong format for DDR handoff, expect PHY",
1166 __func__);
1167 debug(" init engine handoff section after PHY handoff\n");
1168 debug(" section\n");
1169 return -ENOEXEC;
1170 }
1171
1172 handoff->train_imem_base = handoff->phy_base +
1173 DDR_PHY_TRAIN_IMEM_OFFSET;
1174 debug("%s: PHY train IMEM base = 0x%x\n",
1175 __func__, (u32)handoff->train_imem_base);
1176
1177 handoff->train_dmem_base = handoff->phy_base +
1178 DDR_PHY_TRAIN_DMEM_OFFSET;
1179 debug("%s: PHY train DMEM base = 0x%x\n",
1180 __func__, (u32)handoff->train_dmem_base);
1181
1182 handoff->train_imem_length = SOC64_HANDOFF_DDR_TRAIN_IMEM_LENGTH;
1183 debug("%s: PHY train IMEM length = 0x%x\n",
1184 __func__, (u32)handoff->train_imem_length);
1185
1186 handoff->train_dmem_length = SOC64_HANDOFF_DDR_TRAIN_DMEM_LENGTH;
1187 debug("%s: PHY train DMEM length = 0x%x\n",
1188 __func__, (u32)handoff->train_dmem_length);
1189
1190 return 0;
1191}
1192
1193int enable_ddr_clock(struct udevice *dev)
1194{
1195 struct clk *ddr_clk;
1196 int ret;
1197
1198 /* Enable clock before init DDR */
1199 ddr_clk = devm_clk_get(dev, "mem_clk");
1200 if (!IS_ERR(ddr_clk)) {
1201 ret = clk_enable(ddr_clk);
1202 if (ret) {
1203 printf("%s: Failed to enable DDR clock\n", __func__);
1204 return ret;
1205 }
1206 } else {
1207 ret = PTR_ERR(ddr_clk);
1208 debug("%s: Failed to get DDR clock from dts\n", __func__);
1209 return ret;
1210 }
1211
1212 printf("%s: DDR clock is enabled\n", __func__);
1213
1214 return 0;
1215}
1216
1217static int ddr_start_dfi_init(phys_addr_t umctl2_base,
1218 enum ddr_type umctl2_type)
1219{
1220 int ret;
1221
1222 debug("%s: Start DFI init\n", __func__);
1223
1224 /* Enable quasi-dynamic programing of controller registers */
1225 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1226
1227 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
1228 if (ret)
1229 return ret;
1230
1231 /* Start DFI init sequence */
1232 setbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
1233 DDR4_DFIMISC_DFI_INIT_START);
1234
1235 /* Complete quasi-dynamic register programming */
1236 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1237
1238 /* Polling programming done */
1239 ret = wait_for_bit_le32((const void *)(umctl2_base +
1240 DDR4_SWSTAT_OFFSET),
1241 DDR4_SWSTAT_SW_DONE_ACK, true,
1242 TIMEOUT_200MS, false);
1243 if (ret) {
1244 debug("%s: Timeout while waiting for", __func__);
1245 debug(" programming done\n");
1246 }
1247
1248 return ret;
1249}
1250
1251static int ddr_check_dfi_init_complete(phys_addr_t umctl2_base,
1252 enum ddr_type umctl2_type)
1253{
1254 int ret;
1255
1256 /* Polling DFI init complete */
1257 ret = wait_for_bit_le32((const void *)(umctl2_base +
1258 DDR4_DFISTAT_OFFSET),
1259 DDR4_DFI_INIT_COMPLETE, true,
1260 TIMEOUT_200MS, false);
1261 if (ret) {
1262 debug("%s: Timeout while waiting for", __func__);
1263 debug(" DFI init done\n");
1264 return ret;
1265 }
1266
1267 debug("%s: DFI init completed.\n", __func__);
1268
1269 /* Enable quasi-dynamic programing of controller registers */
1270 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1271
1272 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
1273 if (ret)
1274 return ret;
1275
1276 /* Stop DFI init sequence */
1277 clrbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
1278 DDR4_DFIMISC_DFI_INIT_START);
1279
1280 /* Complete quasi-dynamic register programming */
1281 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1282
1283 /* Polling programming done */
1284 ret = wait_for_bit_le32((const void *)(umctl2_base +
1285 DDR4_SWSTAT_OFFSET),
1286 DDR4_SWSTAT_SW_DONE_ACK, true,
1287 TIMEOUT_200MS, false);
1288 if (ret) {
1289 debug("%s: Timeout while waiting for", __func__);
1290 debug(" programming done\n");
1291 return ret;
1292 }
1293
1294 debug("%s:DDR programming done\n", __func__);
1295
1296 return ret;
1297}
1298
1299static int ddr_trigger_sdram_init(phys_addr_t umctl2_base,
1300 enum ddr_type umctl2_type)
1301{
1302 int ret;
1303
1304 /* Enable quasi-dynamic programing of controller registers */
1305 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1306
1307 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
1308 if (ret)
1309 return ret;
1310
1311 /* Unmasking dfi init complete */
1312 setbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
1313 DDR4_DFIMISC_DFI_INIT_COMPLETE_EN);
1314
1315 /* Software exit from self-refresh */
1316 clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_SW);
1317
1318 /* Complete quasi-dynamic register programming */
1319 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1320
1321 /* Polling programming done */
1322 ret = wait_for_bit_le32((const void *)(umctl2_base +
1323 DDR4_SWSTAT_OFFSET),
1324 DDR4_SWSTAT_SW_DONE_ACK, true,
1325 TIMEOUT_200MS, false);
1326 if (ret) {
1327 debug("%s: Timeout while waiting for", __func__);
1328 debug(" programming done\n");
1329 return ret;
1330 }
1331
1332 debug("%s:DDR programming done\n", __func__);
1333 return ret;
1334}
1335
1336static int ddr_post_handoff_config(phys_addr_t umctl2_base,
1337 enum ddr_type umctl2_type)
1338{
1339 int ret = 0;
1340 u32 value;
1341 u32 start = get_timer(0);
1342
1343 do {
1344 if (get_timer(start) > TIMEOUT_200MS) {
1345 debug("%s: Timeout while waiting for",
1346 __func__);
1347 debug(" DDR enters normal operating mode\n");
1348 return -ETIMEDOUT;
1349 }
1350
1351 udelay(1);
1352 WATCHDOG_RESET();
1353
1354 /* Polling until SDRAM entered normal operating mode */
1355 value = readl(umctl2_base + DDR4_STAT_OFFSET) &
1356 DDR4_STAT_OPERATING_MODE;
1357 } while (value != OPM_NORMAL);
1358
1359 printf("DDR entered normal operating mode\n");
1360
1361 /* Enabling auto refresh */
1362 clrbits_le32(umctl2_base + DDR4_RFSHCTL3_OFFSET,
1363 DDR4_RFSHCTL3_DIS_AUTO_REFRESH);
1364
1365 /* Checking ECC is enabled? */
1366 value = readl(umctl2_base + DDR4_ECCCFG0_OFFSET) & DDR4_ECC_MODE;
1367 if (value) {
1368 printf("ECC is enabled\n");
1369 ret = scrubber_ddr_config(umctl2_base, umctl2_type);
1370 if (ret)
1371 printf("Failed to enable ECC\n");
1372 }
1373
1374 return ret;
1375}
1376
1377static int configure_training_firmware(struct ddr_handoff *ddr_handoff_info,
1378 const void *train_imem,
1379 const void *train_dmem)
1380{
1381 int ret = 0;
1382
1383 printf("Configuring training firmware ...\n");
1384
1385 /* Reset SDRAM */
1386 writew(DDR_PHY_PROTECT_MEMRESET,
1387 (uintptr_t)(ddr_handoff_info->phy_base +
1388 DDR_PHY_MEMRESETL_OFFSET));
1389
1390 /* Enable access to the PHY configuration registers */
1391 clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_APBONLY0_OFFSET,
1392 DDR_PHY_MICROCONTMUXSEL);
1393
1394 /* Copy train IMEM bin */
1395 memcpy((void *)ddr_handoff_info->train_imem_base, train_imem,
1396 ddr_handoff_info->train_imem_length);
1397
1398 ret = memcmp((void *)ddr_handoff_info->train_imem_base, train_imem,
1399 ddr_handoff_info->train_imem_length);
1400 if (ret) {
1401 debug("%s: Failed to copy train IMEM binary\n", __func__);
1402 /* Isolate the APB access from internal CSRs */
1403 setbits_le16(ddr_handoff_info->phy_base +
1404 DDR_PHY_APBONLY0_OFFSET, DDR_PHY_MICROCONTMUXSEL);
1405 return ret;
1406 }
1407
1408 memcpy((void *)ddr_handoff_info->train_dmem_base, train_dmem,
1409 ddr_handoff_info->train_dmem_length);
1410
1411 ret = memcmp((void *)ddr_handoff_info->train_dmem_base, train_dmem,
1412 ddr_handoff_info->train_dmem_length);
1413 if (ret)
1414 debug("%s: Failed to copy train DMEM binary\n", __func__);
1415
1416 /* Isolate the APB access from internal CSRs */
1417 setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_APBONLY0_OFFSET,
1418 DDR_PHY_MICROCONTMUXSEL);
1419
1420 return ret;
1421}
1422
1423static void calibrating_sdram(struct ddr_handoff *ddr_handoff_info)
1424{
1425 /* Init mailbox protocol - set 1 to DCTWRITEPROT[0] */
1426 setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1427 DDR_PHY_DCTWRITEPROT);
1428
1429 /* Init mailbox protocol - set 1 to UCTWRITEPROT[0] */
1430 setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_UCTWRITEPROT_OFFSET,
1431 DDR_PHY_UCTWRITEPROT);
1432
1433 /* Reset and stalling ARC processor */
1434 setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
1435 DDR_PHY_MICRORESET_RESET | DDR_PHY_MICRORESET_STALL);
1436
1437 /* Release ARC processor */
1438 clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
1439 DDR_PHY_MICRORESET_RESET);
1440
1441 /* Starting PHY firmware execution */
1442 clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
1443 DDR_PHY_MICRORESET_STALL);
1444}
1445
1446static int get_mail(struct ddr_handoff *handoff, enum message_mode mode,
1447 u32 *message_id)
1448{
1449 int ret;
1450
1451 /* Polling major messages from PMU */
1452 ret = wait_for_bit_le16((const void *)(handoff->phy_base +
1453 DDR_PHY_UCTSHADOWREGS_OFFSET),
1454 DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
1455 false, TIMEOUT_200MS, false);
1456 if (ret) {
1457 debug("%s: Timeout while waiting for",
1458 __func__);
1459 debug(" major messages from PMU\n");
1460 return ret;
1461 }
1462
1463 *message_id = readw((uintptr_t)(handoff->phy_base +
1464 DDR_PHY_UCTWRITEONLYSHADOW_OFFSET));
1465
1466 if (mode == STREAMING_MESSAGE)
1467 *message_id |= readw((uintptr_t)((handoff->phy_base +
1468 DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET))) <<
1469 SZ_16;
1470
1471 /* Ack the receipt of the major message */
1472 clrbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1473 DDR_PHY_DCTWRITEPROT);
1474
1475 ret = wait_for_bit_le16((const void *)(handoff->phy_base +
1476 DDR_PHY_UCTSHADOWREGS_OFFSET),
1477 DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
1478 true, TIMEOUT_200MS, false);
1479 if (ret) {
1480 debug("%s: Timeout while waiting for",
1481 __func__);
1482 debug(" ack the receipt of the major message completed\n");
1483 return ret;
1484 }
1485
1486 /* Complete protocol */
1487 setbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1488 DDR_PHY_DCTWRITEPROT);
1489
1490 return ret;
1491}
1492
1493static int get_mail_streaming(struct ddr_handoff *handoff,
1494 enum message_mode mode, u32 *index)
1495{
1496 int ret;
1497
1498 *index = readw((uintptr_t)(handoff->phy_base +
1499 DDR_PHY_UCTWRITEONLYSHADOW_OFFSET));
1500
1501 if (mode == STREAMING_MESSAGE)
1502 *index |= readw((uintptr_t)((handoff->phy_base +
1503 DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET))) <<
1504 SZ_16;
1505
1506 /* Ack the receipt of the major message */
1507 clrbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1508 DDR_PHY_DCTWRITEPROT);
1509
1510 ret = wait_for_bit_le16((const void *)(handoff->phy_base +
1511 DDR_PHY_UCTSHADOWREGS_OFFSET),
1512 DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
1513 true, TIMEOUT_200MS, false);
1514 if (ret) {
1515 debug("%s: Timeout while waiting for",
1516 __func__);
1517 debug(" ack the receipt of the major message completed\n");
1518 return ret;
1519 }
1520
1521 /* Complete protocol */
1522 setbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1523 DDR_PHY_DCTWRITEPROT);
1524
1525 return 0;
1526}
1527
1528static int decode_streaming_message(struct ddr_handoff *ddr_handoff_info,
1529 u32 *streaming_index)
1530{
1531 int i = 0, ret;
1532 u32 temp;
1533
1534 temp = *streaming_index;
1535
1536 while (i < GET_LOWHW_DATA(temp)) {
1537 ret = get_mail(ddr_handoff_info, STREAMING_MESSAGE,
1538 streaming_index);
1539 if (ret)
1540 return ret;
1541
1542 printf("args[%d]: 0x%x ", i, *streaming_index);
1543 i++;
1544 }
1545
1546 return 0;
1547}
1548
1549static int poll_for_training_complete(struct ddr_handoff *ddr_handoff_info)
1550{
1551 int ret;
1552 u32 message_id = 0;
1553 u32 streaming_index = 0;
1554
1555 do {
1556 ret = get_mail(ddr_handoff_info, MAJOR_MESSAGE, &message_id);
1557 if (ret)
1558 return ret;
1559
1560 printf("Major message id = 0%x\n", message_id);
1561
1562 if (message_id == FW_STREAMING_MSG_ID) {
1563 ret = get_mail_streaming(ddr_handoff_info,
1564 STREAMING_MESSAGE,
1565 &streaming_index);
1566 if (ret)
1567 return ret;
1568
1569 printf("streaming index 0%x : ", streaming_index);
1570
1571 decode_streaming_message(ddr_handoff_info,
1572 &streaming_index);
1573
1574 printf("\n");
1575 }
1576 } while ((message_id != FW_TRAINING_COMPLETED_STAT) &&
1577 (message_id != FW_TRAINING_FAILED_STAT));
1578
1579 if (message_id == FW_TRAINING_COMPLETED_STAT) {
1580 printf("DDR firmware training completed\n");
1581 } else if (message_id == FW_TRAINING_FAILED_STAT) {
1582 printf("DDR firmware training failed\n");
1583 hang();
1584 }
1585
1586 return 0;
1587}
1588
1589static void enable_phy_clk_for_csr_access(struct ddr_handoff *handoff,
1590 bool enable)
1591{
1592 if (enable) {
1593 /* Enable PHY clk */
1594 setbits_le16((uintptr_t)(handoff->phy_base +
1595 DDR_PHY_UCCLKHCLKENABLES_OFFSET),
1596 DDR_PHY_UCCLKHCLKENABLES_UCCLKEN |
1597 DDR_PHY_UCCLKHCLKENABLES_HCLKEN);
1598 } else {
1599 /* Disable PHY clk */
1600 clrbits_le16((uintptr_t)(handoff->phy_base +
1601 DDR_PHY_UCCLKHCLKENABLES_OFFSET),
1602 DDR_PHY_UCCLKHCLKENABLES_UCCLKEN |
1603 DDR_PHY_UCCLKHCLKENABLES_HCLKEN);
1604 }
1605}
1606
1607/* helper function for updating train result to umctl2 RANKCTL register */
1608static void set_cal_res_to_rankctrl(u32 reg_addr, u16 update_value,
1609 u32 mask, u32 msb_mask, u32 shift)
1610{
1611 u32 reg, value;
1612
1613 reg = readl((uintptr_t)reg_addr);
1614
1615 debug("max value divided by 2 is 0x%x\n", update_value);
1616 debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
1617 debug("update with train result\n");
1618
1619 value = (reg & mask) >> shift;
1620
1621 value += update_value + 3;
1622
1623 /* reg value greater than 0xF, set one to diff_rank_wr_gap_msb */
1624 if (value > 0xF)
1625 setbits_le32((u32 *)(uintptr_t)reg_addr, msb_mask);
1626 else
1627 clrbits_le32((u32 *)(uintptr_t)reg_addr, msb_mask);
1628
1629 reg = readl((uintptr_t)reg_addr);
1630
1631 value = (value << shift) & mask;
1632
1633 /* update register */
1634 writel((reg & (~mask)) | value, (uintptr_t)reg_addr);
1635
1636 reg = readl((uintptr_t)reg_addr);
1637 debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
1638 debug("update with train result\n");
1639}
1640
1641/* helper function for updating train result to register */
1642static void set_cal_res_to_reg(u32 reg_addr, u16 update_value, u32 mask,
1643 u32 shift)
1644{
1645 u32 reg, value;
1646
1647 reg = readl((uintptr_t)reg_addr);
1648
1649 debug("max value divided by 2 is 0x%x\n", update_value);
1650 debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
1651 debug("update with train result\n");
1652
1653 value = (reg & mask) >> shift;
1654
1655 value = ((value + update_value + 3) << shift) & mask;
1656
1657 /* update register */
1658 writel((reg & (~mask)) | value, (uintptr_t)reg_addr);
1659
1660 reg = readl((uintptr_t)reg_addr);
1661 debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
1662 debug("update with train result\n");
1663}
1664
1665static u16 get_max_txdqsdlytg0_ux_p0(struct ddr_handoff *handoff, u32 reg,
1666 u8 numdbyte, u16 upd_val)
1667{
1668 u32 b_addr;
1669 u16 val;
1670 u8 byte;
1671
1672 /* Getting max value from DBYTEx TxDqsDlyTg0_ux_p0 */
1673 for (byte = 0; byte < numdbyte; byte++) {
1674 b_addr = byte << 13;
1675
1676 /* TxDqsDlyTg0[9:6] is the coarse delay */
1677 val = (readw((uintptr_t)(handoff->phy_base +
1678 reg + b_addr)) &
1679 DDR_PHY_TXDQDLYTG0_COARSE_DELAY) >>
1680 DDR_PHY_TXDQDLYTG0_COARSE_DELAY_SHIFT;
1681
1682 upd_val = max(val, upd_val);
1683 }
1684
1685 return upd_val;
1686}
1687
1688static int set_cal_res_to_umctl2(struct ddr_handoff *handoff,
1689 phys_addr_t umctl2_base,
1690 enum ddr_type umctl2_type)
1691{
1692 int ret;
1693 u8 numdbyte = 0x8;
1694 u16 upd_val, val;
1695 u32 dramtmg2_reg_addr, rankctl_reg_addr, reg_addr;
1696
1697 /* Enable quasi-dynamic programing of the controller registers */
1698 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1699
1700 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
1701 if (ret)
1702 return ret;
1703
1704 /* Enable access to the PHY configuration registers */
1705 clrbits_le16(handoff->phy_base + DDR_PHY_APBONLY0_OFFSET,
1706 DDR_PHY_MICROCONTMUXSEL);
1707
1708 if (umctl2_type == DDRTYPE_DDR4) {
1709 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1710 DMEM_MB_CDD_WW_1_0_OFFSET)));
1711
1712 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1713 DMEM_MB_CDD_WW_0_1_OFFSET)));
1714 } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
1715 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1716 DMEM_MB_CDD_CHA_WW_1_0_OFFSET)));
1717
1718 upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1719 DMEM_MB_CDD_CHA_WW_0_1_OFFSET)));
1720 } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
1721 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1722 DMEM_MB_CDD_CHB_WW_1_0_OFFSET)));
1723
1724 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1725 DMEM_MB_CDD_CHB_WW_0_1_OFFSET)));
1726 }
1727
1728 upd_val = max(val, upd_val);
1729 debug("max value is 0x%x\n", upd_val);
1730
1731 /* Divided by two is required when running in freq ratio 1:2 */
1732 if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
1733 upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
1734
1735 debug("Update train value to umctl2 RANKCTL.diff_rank_wr_gap\n");
1736 rankctl_reg_addr = umctl2_base + DDR4_RANKCTL_OFFSET;
1737 /* Update train value to umctl2 RANKCTL.diff_rank_wr_gap */
1738 set_cal_res_to_rankctrl(rankctl_reg_addr, upd_val,
1739 DDR4_RANKCTL_DIFF_RANK_WR_GAP,
1740 DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB,
1741 DDR4_RANKCTL_DIFF_RANK_WR_GAP_SHIFT);
1742
1743 debug("Update train value to umctl2 DRAMTMG2.W2RD\n");
1744 dramtmg2_reg_addr = umctl2_base + DDR4_DRAMTMG2_OFFSET;
1745 /* Update train value to umctl2 dramtmg2.wr2rd */
1746 set_cal_res_to_reg(dramtmg2_reg_addr, upd_val, DDR4_DRAMTMG2_WR2RD, 0);
1747
1748 if (umctl2_type == DDRTYPE_DDR4) {
1749 debug("Update train value to umctl2 DRAMTMG9.W2RD_S\n");
1750 reg_addr = umctl2_base + DDR4_DRAMTMG9_OFFSET;
1751 /* Update train value to umctl2 dramtmg9.wr2rd_s */
1752 set_cal_res_to_reg(reg_addr, upd_val, DDR4_DRAMTMG9_W2RD_S, 0);
1753 }
1754
1755 if (umctl2_type == DDRTYPE_DDR4) {
1756 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1757 DMEM_MB_CDD_RR_1_0_OFFSET)));
1758
1759 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1760 DMEM_MB_CDD_RR_0_1_OFFSET)));
1761 } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
1762 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1763 DMEM_MB_CDD_CHA_RR_1_0_OFFSET)));
1764
1765 upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1766 DMEM_MB_CDD_CHA_RR_0_1_OFFSET)));
1767 } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
1768 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1769 DMEM_MB_CDD_CHB_RR_1_0_OFFSET)));
1770
1771 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1772 DMEM_MB_CDD_CHB_RR_0_1_OFFSET)));
1773 }
1774
1775 upd_val = max(val, upd_val);
1776 debug("max value is 0x%x\n", upd_val);
1777
1778 /* Divided by two is required when running in freq ratio 1:2 */
1779 if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
1780 upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
1781
1782 debug("Update train value to umctl2 RANKCTL.diff_rank_rd_gap\n");
1783 /* Update train value to umctl2 RANKCTL.diff_rank_rd_gap */
1784 set_cal_res_to_rankctrl(rankctl_reg_addr, upd_val,
1785 DDR4_RANKCTL_DIFF_RANK_RD_GAP,
1786 DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB,
1787 DDR4_RANKCTL_DIFF_RANK_RD_GAP_SHIFT);
1788
1789 if (umctl2_type == DDRTYPE_DDR4) {
1790 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1791 DMEM_MB_CDD_RW_1_1_OFFSET)));
1792
1793 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1794 DMEM_MB_CDD_RW_1_0_OFFSET)));
1795
1796 upd_val = max(val, upd_val);
1797
1798 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1799 DMEM_MB_CDD_RW_0_1_OFFSET)));
1800
1801 upd_val = max(val, upd_val);
1802
1803 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1804 DMEM_MB_CDD_RW_0_0_OFFSET)));
1805
1806 upd_val = max(val, upd_val);
1807 } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
1808 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1809 DMEM_MB_CDD_CHA_RW_1_1_OFFSET)));
1810
1811 upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1812 DMEM_MB_CDD_CHA_RW_1_0_OFFSET)));
1813
1814 upd_val = max(val, upd_val);
1815
1816 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1817 DMEM_MB_CDD_CHA_RW_0_1_OFFSET)));
1818
1819 upd_val = max(val, upd_val);
1820
1821 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1822 DMEM_MB_CDD_CHA_RW_0_0_OFFSET)));
1823
1824 upd_val = max(val, upd_val);
1825 } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
1826 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1827 DMEM_MB_CDD_CHB_RW_1_1_OFFSET)));
1828
1829 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1830 DMEM_MB_CDD_CHB_RW_1_0_OFFSET)));
1831
1832 upd_val = max(val, upd_val);
1833
1834 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1835 DMEM_MB_CDD_CHB_RW_0_1_OFFSET)));
1836
1837 upd_val = max(val, upd_val);
1838
1839 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1840 DMEM_MB_CDD_CHB_RW_0_0_OFFSET)));
1841
1842 upd_val = max(val, upd_val);
1843 }
1844
1845 debug("max value is 0x%x\n", upd_val);
1846
1847 /* Divided by two is required when running in freq ratio 1:2 */
1848 if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
1849 upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
1850
1851 debug("Update train value to umctl2 dramtmg2.rd2wr\n");
1852 /* Update train value to umctl2 dramtmg2.rd2wr */
1853 set_cal_res_to_reg(dramtmg2_reg_addr, upd_val, DDR4_DRAMTMG2_RD2WR,
1854 DDR4_DRAMTMG2_RD2WR_SHIFT);
1855
1856 /* Checking ECC is enabled?, lpddr4 using inline ECC */
1857 val = readl(umctl2_base + DDR4_ECCCFG0_OFFSET) & DDR4_ECC_MODE;
1858 if (val && umctl2_type == DDRTYPE_DDR4)
1859 numdbyte = 0x9;
1860
1861 upd_val = 0;
1862
1863 /* Getting max value from DBYTEx TxDqsDlyTg0_u0_p0 */
1864 upd_val = get_max_txdqsdlytg0_ux_p0(handoff,
1865 DDR_PHY_DBYTE0_TXDQDLYTG0_U0_P0,
1866 numdbyte, upd_val);
1867
1868 /* Getting max value from DBYTEx TxDqsDlyTg0_u1_p0 */
1869 upd_val = get_max_txdqsdlytg0_ux_p0(handoff,
1870 DDR_PHY_DBYTE0_TXDQDLYTG0_U1_P0,
1871 numdbyte, upd_val);
1872
1873 debug("TxDqsDlyTg0 max value is 0x%x\n", upd_val);
1874
1875 /* Divided by two is required when running in freq ratio 1:2 */
1876 if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
1877 upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
1878
1879 reg_addr = umctl2_base + DDR4_DFITMG1_OFFSET;
1880 /* Update train value to umctl2 dfitmg1.dfi_wrdata_delay */
1881 set_cal_res_to_reg(reg_addr, upd_val, DDR4_DFITMG1_DFI_T_WRDATA_DELAY,
1882 DDR4_DFITMG1_DFI_T_WRDATA_SHIFT);
1883
1884 /* Complete quasi-dynamic register programming */
1885 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1886
1887 /* Polling programming done */
1888 ret = wait_for_bit_le32((const void *)(umctl2_base +
1889 DDR4_SWSTAT_OFFSET), DDR4_SWSTAT_SW_DONE_ACK,
1890 true, TIMEOUT_200MS, false);
1891 if (ret) {
1892 debug("%s: Timeout while waiting for", __func__);
1893 debug(" programming done\n");
1894 }
1895
1896 /* Isolate the APB access from internal CSRs */
1897 setbits_le16(handoff->phy_base + DDR_PHY_APBONLY0_OFFSET,
1898 DDR_PHY_MICROCONTMUXSEL);
1899
1900 return ret;
1901}
1902
1903static int update_training_result(struct ddr_handoff *ddr_handoff_info)
1904{
1905 int ret = 0;
1906
1907 /* Updating training result to first DDR controller */
1908 if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
1909 ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
1910 ret = set_cal_res_to_umctl2(ddr_handoff_info,
1911 ddr_handoff_info->cntlr_base,
1912 ddr_handoff_info->cntlr_t);
1913 if (ret) {
1914 debug("%s: Failed to update train result to ",
1915 __func__);
1916 debug("first DDR controller\n");
1917 return ret;
1918 }
1919 }
1920
1921 /* Updating training result to 2nd DDR controller */
1922 if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
1923 ret = set_cal_res_to_umctl2(ddr_handoff_info,
1924 ddr_handoff_info->cntlr2_base,
1925 ddr_handoff_info->cntlr2_t);
1926 if (ret) {
1927 debug("%s: Failed to update train result to ",
1928 __func__);
1929 debug("2nd DDR controller\n");
1930 }
1931 }
1932
1933 return ret;
1934}
1935
1936static int start_ddr_calibration(struct ddr_handoff *ddr_handoff_info)
1937{
1938 int ret;
1939
1940 /* Implement 1D training firmware */
1941 ret = configure_training_firmware(ddr_handoff_info,
1942 (const void *)SOC64_HANDOFF_DDR_TRAIN_IMEM_1D_SECTION,
1943 (const void *)SOC64_HANDOFF_DDR_TRAIN_DMEM_1D_SECTION);
1944 if (ret) {
1945 debug("%s: Failed to configure 1D training firmware\n",
1946 __func__);
1947 return ret;
1948 }
1949
1950 calibrating_sdram(ddr_handoff_info);
1951
1952 ret = poll_for_training_complete(ddr_handoff_info);
1953 if (ret) {
1954 debug("%s: Failed to get FW training completed\n",
1955 __func__);
1956 return ret;
1957 }
1958
1959 /* Updating training result to DDR controller */
1960 ret = update_training_result(ddr_handoff_info);
1961 if (ret)
1962 return ret;
1963
1964 /* Implement 2D training firmware */
1965 ret = configure_training_firmware(ddr_handoff_info,
1966 (const void *)SOC64_HANDOFF_DDR_TRAIN_IMEM_2D_SECTION,
1967 (const void *)SOC64_HANDOFF_DDR_TRAIN_DMEM_2D_SECTION);
1968 if (ret) {
1969 debug("%s: Failed to update train result to ", __func__);
1970 debug("DDR controller\n");
1971 return ret;
1972 }
1973
1974 calibrating_sdram(ddr_handoff_info);
1975
1976 ret = poll_for_training_complete(ddr_handoff_info);
1977 if (ret)
1978 debug("%s: Failed to get FW training completed\n",
1979 __func__);
1980
1981 return ret;
1982}
1983
1984static int init_controller(struct ddr_handoff *ddr_handoff_info,
1985 u32 *user_backup, u32 *user_backup_2nd)
1986{
1987 int ret = 0;
1988
1989 if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
1990 ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
1991 /* Initialize 1st DDR controller */
1992 ret = init_umctl2(ddr_handoff_info->cntlr_handoff_base,
1993 ddr_handoff_info->cntlr_base,
1994 ddr_handoff_info->cntlr_t,
1995 ddr_handoff_info->cntlr_handoff_length,
1996 user_backup);
1997 if (ret) {
1998 debug("%s: Failed to inilialize first controller\n",
1999 __func__);
2000 return ret;
2001 }
2002 }
2003
2004 if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
2005 /* Initialize 2nd DDR controller */
2006 ret = init_umctl2(ddr_handoff_info->cntlr2_handoff_base,
2007 ddr_handoff_info->cntlr2_base,
2008 ddr_handoff_info->cntlr2_t,
2009 ddr_handoff_info->cntlr2_handoff_length,
2010 user_backup_2nd);
2011 if (ret)
2012 debug("%s: Failed to inilialize 2nd controller\n",
2013 __func__);
2014 }
2015
2016 return ret;
2017}
2018
2019static int dfi_init(struct ddr_handoff *ddr_handoff_info)
2020{
2021 int ret;
2022
2023 ret = ddr_start_dfi_init(ddr_handoff_info->cntlr_base,
2024 ddr_handoff_info->cntlr_t);
2025 if (ret)
2026 return ret;
2027
2028 if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1)
2029 ret = ddr_start_dfi_init(ddr_handoff_info->cntlr2_base,
2030 ddr_handoff_info->cntlr2_t);
2031
2032 return ret;
2033}
2034
2035static int check_dfi_init(struct ddr_handoff *handoff)
2036{
2037 int ret;
2038
2039 ret = ddr_check_dfi_init_complete(handoff->cntlr_base,
2040 handoff->cntlr_t);
2041 if (ret)
2042 return ret;
2043
2044 if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
2045 ret = ddr_check_dfi_init_complete(handoff->cntlr2_base,
2046 handoff->cntlr2_t);
2047
2048 return ret;
2049}
2050
2051static int trigger_sdram_init(struct ddr_handoff *handoff)
2052{
2053 int ret;
2054
2055 ret = ddr_trigger_sdram_init(handoff->cntlr_base,
2056 handoff->cntlr_t);
2057 if (ret)
2058 return ret;
2059
2060 if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
2061 ret = ddr_trigger_sdram_init(handoff->cntlr2_base,
2062 handoff->cntlr2_t);
2063
2064 return ret;
2065}
2066
2067static int ddr_post_config(struct ddr_handoff *handoff)
2068{
2069 int ret;
2070
2071 ret = ddr_post_handoff_config(handoff->cntlr_base,
2072 handoff->cntlr_t);
2073 if (ret)
2074 return ret;
2075
2076 if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
2077 ret = ddr_post_handoff_config(handoff->cntlr2_base,
2078 handoff->cntlr2_t);
2079
2080 return ret;
2081}
2082
2083static bool is_ddr_retention_enabled(u32 boot_scratch_cold0_reg)
2084{
2085 return boot_scratch_cold0_reg &
2086 ALT_SYSMGR_SCRATCH_REG_0_DDR_RETENTION_MASK;
2087}
2088
2089static bool is_ddr_bitstream_sha_matching(u32 boot_scratch_cold0_reg)
2090{
2091 return boot_scratch_cold0_reg & ALT_SYSMGR_SCRATCH_REG_0_DDR_SHA_MASK;
2092}
2093
2094static enum reset_type get_reset_type(u32 boot_scratch_cold0_reg)
2095{
2096 return (boot_scratch_cold0_reg &
2097 ALT_SYSMGR_SCRATCH_REG_0_DDR_RESET_TYPE_MASK) >>
2098 ALT_SYSMGR_SCRATCH_REG_0_DDR_RESET_TYPE_SHIFT;
2099}
2100
2101void reset_type_debug_print(u32 boot_scratch_cold0_reg)
2102{
2103 switch (get_reset_type(boot_scratch_cold0_reg)) {
2104 case POR_RESET:
2105 debug("%s: POR is triggered\n", __func__);
2106 break;
2107 case WARM_RESET:
2108 debug("%s: Warm reset is triggered\n", __func__);
2109 break;
2110 case COLD_RESET:
2111 debug("%s: Cold reset is triggered\n", __func__);
2112 break;
2113 default:
2114 debug("%s: Invalid reset type\n", __func__);
2115 }
2116}
2117
2118bool is_ddr_init(void)
2119{
2120 u32 reg = readl(socfpga_get_sysmgr_addr() +
2121 SYSMGR_SOC64_BOOT_SCRATCH_COLD0);
2122
2123 reset_type_debug_print(reg);
2124
2125 if (get_reset_type(reg) == POR_RESET) {
2126 debug("%s: DDR init is required\n", __func__);
2127 return true;
2128 }
2129
2130 if (get_reset_type(reg) == WARM_RESET) {
2131 debug("%s: DDR init is skipped\n", __func__);
2132 return false;
2133 }
2134
2135 if (get_reset_type(reg) == COLD_RESET) {
2136 if (is_ddr_retention_enabled(reg) &&
2137 is_ddr_bitstream_sha_matching(reg)) {
2138 debug("%s: DDR retention bit is set\n", __func__);
2139 debug("%s: Matching in DDR bistream\n", __func__);
2140 debug("%s: DDR init is skipped\n", __func__);
2141 return false;
2142 }
2143 }
2144
2145 debug("%s: DDR init is required\n", __func__);
2146 return true;
2147}
2148
2149int sdram_mmr_init_full(struct udevice *dev)
2150{
2151 u32 user_backup[2], user_backup_2nd[2];
2152 int ret;
2153 struct bd_info bd;
2154 struct ddr_handoff ddr_handoff_info;
2155 struct altera_sdram_priv *priv = dev_get_priv(dev);
2156
2157 printf("Checking SDRAM configuration in progress ...\n");
2158 ret = populate_ddr_handoff(&ddr_handoff_info);
2159 if (ret) {
2160 debug("%s: Failed to populate DDR handoff\n",
2161 __func__);
2162 return ret;
2163 }
2164
2165 /* Set the MPFE NoC mux to correct DDR controller type */
2166 use_ddr4(ddr_handoff_info.cntlr_t);
2167
2168 if (is_ddr_init()) {
2169 printf("SDRAM init in progress ...\n");
2170
2171 /*
2172 * Polling reset complete, must be high to ensure DDR subsystem
2173 * in complete reset state before init DDR clock and DDR
2174 * controller
2175 */
2176 ret = wait_for_bit_le32((const void *)((uintptr_t)(readl
2177 (ddr_handoff_info.mem_reset_base) +
2178 MEM_RST_MGR_STATUS)),
2179 MEM_RST_MGR_STATUS_RESET_COMPLETE,
2180 true, TIMEOUT_200MS, false);
2181 if (ret) {
2182 debug("%s: Timeout while waiting for", __func__);
2183 debug(" reset complete done\n");
2184 return ret;
2185 }
2186
2187 ret = enable_ddr_clock(dev);
2188 if (ret)
2189 return ret;
2190
2191 ret = init_controller(&ddr_handoff_info, user_backup,
2192 user_backup_2nd);
2193 if (ret) {
2194 debug("%s: Failed to inilialize DDR controller\n",
2195 __func__);
2196 return ret;
2197 }
2198
2199 /* Release the controller from reset */
2200 setbits_le32((uintptr_t)
2201 (readl(ddr_handoff_info.mem_reset_base) +
2202 MEM_RST_MGR_STATUS), MEM_RST_MGR_STATUS_AXI_RST |
2203 MEM_RST_MGR_STATUS_CONTROLLER_RST |
2204 MEM_RST_MGR_STATUS_RESET_COMPLETE);
2205
2206 printf("DDR controller configuration is completed\n");
2207
2208 /* Initialize DDR PHY */
2209 ret = init_phy(&ddr_handoff_info);
2210 if (ret) {
2211 debug("%s: Failed to inilialize DDR PHY\n", __func__);
2212 return ret;
2213 }
2214
2215 enable_phy_clk_for_csr_access(&ddr_handoff_info, true);
2216
2217 ret = start_ddr_calibration(&ddr_handoff_info);
2218 if (ret) {
2219 debug("%s: Failed to calibrate DDR\n", __func__);
2220 return ret;
2221 }
2222
2223 enable_phy_clk_for_csr_access(&ddr_handoff_info, false);
2224
2225 /* Reset ARC processor when no using for security purpose */
2226 setbits_le16(ddr_handoff_info.phy_base +
2227 DDR_PHY_MICRORESET_OFFSET,
2228 DDR_PHY_MICRORESET_RESET);
2229
2230 /* DDR freq set to support DDR4-3200 */
2231 phy_init_engine(&ddr_handoff_info);
2232
2233 ret = dfi_init(&ddr_handoff_info);
2234 if (ret)
2235 return ret;
2236
2237 ret = check_dfi_init(&ddr_handoff_info);
2238 if (ret)
2239 return ret;
2240
2241 ret = trigger_sdram_init(&ddr_handoff_info);
2242 if (ret)
2243 return ret;
2244
2245 ret = ddr_post_config(&ddr_handoff_info);
2246 if (ret)
2247 return ret;
2248
2249 /* Restore user settings */
2250 writel(user_backup[0], ddr_handoff_info.cntlr_base +
2251 DDR4_PWRCTL_OFFSET);
2252
2253 if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_0)
2254 setbits_le32(ddr_handoff_info.cntlr_base +
2255 DDR4_INIT0_OFFSET, user_backup[1]);
2256
2257 if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_1) {
2258 /* Restore user settings */
2259 writel(user_backup_2nd[0],
2260 ddr_handoff_info.cntlr2_base +
2261 DDR4_PWRCTL_OFFSET);
2262
2263 setbits_le32(ddr_handoff_info.cntlr2_base +
2264 DDR4_INIT0_OFFSET, user_backup_2nd[1]);
2265 }
2266
2267 /* Enable input traffic per port */
2268 setbits_le32(ddr_handoff_info.cntlr_base + DDR4_PCTRL0_OFFSET,
2269 DDR4_PCTRL0_PORT_EN);
2270
2271 if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_1) {
2272 /* Enable input traffic per port */
2273 setbits_le32(ddr_handoff_info.cntlr2_base +
2274 DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
2275 }
2276
2277 printf("DDR init success\n");
2278 }
2279
2280 /* Get bank configuration from devicetree */
2281 ret = fdtdec_decode_ram_size(gd->fdt_blob, NULL, 0, NULL,
2282 (phys_size_t *)&gd->ram_size, &bd);
2283 if (ret) {
2284 debug("%s: Failed to decode memory node\n", __func__);
2285 return -1;
2286 }
2287
2288 printf("DDR: %lld MiB\n", gd->ram_size >> 20);
2289
2290 priv->info.base = bd.bi_dram[0].start;
2291 priv->info.size = gd->ram_size;
2292
2293 sdram_size_check(&bd);
2294
2295 sdram_set_firewall(&bd);
2296
2297 return 0;
2298}