blob: db09986f64b46bee631900ffcfbd1bbe65b6689c [file] [log] [blame]
Tien Fong Chee277f1f42022-06-10 19:18:00 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
Tien Fong Cheef8e2eab2021-08-10 11:26:37 +08002/*
Tien Fong Chee277f1f42022-06-10 19:18:00 +08003 * Copyright (C) 2020-2022 Intel Corporation <www.intel.com>
Tien Fong Cheef8e2eab2021-08-10 11:26:37 +08004 *
5 */
6
Tien Fong Cheef8e2eab2021-08-10 11:26:37 +08007#include <clk.h>
8#include <div64.h>
9#include <dm.h>
10#include <errno.h>
11#include <fdtdec.h>
12#include <hang.h>
13#include <ram.h>
14#include <reset.h>
15#include "sdram_soc64.h"
16#include <wait_bit.h>
17#include <asm/arch/firewall.h>
18#include <asm/arch/handoff_soc64.h>
19#include <asm/arch/misc.h>
20#include <asm/arch/reset_manager.h>
21#include <asm/arch/system_manager.h>
22#include <asm/io.h>
23#include <linux/err.h>
24#include <linux/sizes.h>
25
26DECLARE_GLOBAL_DATA_PTR;
27
28/* MPFE NOC registers */
29#define FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0 0xF8024050
30
31/* Memory reset manager */
32#define MEM_RST_MGR_STATUS 0x8
33
34/* Register and bit in memory reset manager */
35#define MEM_RST_MGR_STATUS_RESET_COMPLETE BIT(0)
36#define MEM_RST_MGR_STATUS_PWROKIN_STATUS BIT(1)
37#define MEM_RST_MGR_STATUS_CONTROLLER_RST BIT(2)
38#define MEM_RST_MGR_STATUS_AXI_RST BIT(3)
39
40#define TIMEOUT_200MS 200
41#define TIMEOUT_5000MS 5000
42
43/* DDR4 umctl2 */
44#define DDR4_MSTR_OFFSET 0x0
45#define DDR4_FREQ_RATIO BIT(22)
46
47#define DDR4_STAT_OFFSET 0x4
48#define DDR4_STAT_SELFREF_TYPE GENMASK(5, 4)
49#define DDR4_STAT_SELFREF_TYPE_SHIFT 4
50#define DDR4_STAT_OPERATING_MODE GENMASK(2, 0)
51
52#define DDR4_MRCTRL0_OFFSET 0x10
53#define DDR4_MRCTRL0_MR_TYPE BIT(0)
54#define DDR4_MRCTRL0_MPR_EN BIT(1)
55#define DDR4_MRCTRL0_MR_RANK GENMASK(5, 4)
56#define DDR4_MRCTRL0_MR_RANK_SHIFT 4
57#define DDR4_MRCTRL0_MR_ADDR GENMASK(15, 12)
58#define DDR4_MRCTRL0_MR_ADDR_SHIFT 12
59#define DDR4_MRCTRL0_MR_WR BIT(31)
60
61#define DDR4_MRCTRL1_OFFSET 0x14
62#define DDR4_MRCTRL1_MR_DATA 0x3FFFF
63
64#define DDR4_MRSTAT_OFFSET 0x18
65#define DDR4_MRSTAT_MR_WR_BUSY BIT(0)
66
67#define DDR4_MRCTRL2_OFFSET 0x1C
68
69#define DDR4_PWRCTL_OFFSET 0x30
70#define DDR4_PWRCTL_SELFREF_EN BIT(0)
71#define DDR4_PWRCTL_POWERDOWN_EN BIT(1)
72#define DDR4_PWRCTL_EN_DFI_DRAM_CLK_DISABLE BIT(3)
73#define DDR4_PWRCTL_SELFREF_SW BIT(5)
74
75#define DDR4_PWRTMG_OFFSET 0x34
76#define DDR4_HWLPCTL_OFFSET 0x38
77#define DDR4_RFSHCTL0_OFFSET 0x50
78#define DDR4_RFSHCTL1_OFFSET 0x54
79
80#define DDR4_RFSHCTL3_OFFSET 0x60
81#define DDR4_RFSHCTL3_DIS_AUTO_REFRESH BIT(0)
82#define DDR4_RFSHCTL3_REFRESH_MODE GENMASK(6, 4)
83#define DDR4_RFSHCTL3_REFRESH_MODE_SHIFT 4
84
85#define DDR4_ECCCFG0_OFFSET 0x70
86#define DDR4_ECC_MODE GENMASK(2, 0)
87#define DDR4_DIS_SCRUB BIT(4)
88#define LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT 30
89#define LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT 8
90
91#define DDR4_ECCCFG1_OFFSET 0x74
92#define LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK BIT(4)
93
94#define DDR4_CRCPARCTL0_OFFSET 0xC0
95#define DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR BIT(1)
96
97#define DDR4_CRCPARCTL1_OFFSET 0xC4
98#define DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE BIT(8)
99#define DDR4_CRCPARCTL1_ALERT_WAIT_FOR_SW BIT(9)
100
101#define DDR4_CRCPARSTAT_OFFSET 0xCC
102#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_INT BIT(16)
103#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_FATL_INT BIT(17)
104#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_NO_SW BIT(19)
105#define DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW BIT(29)
106
107#define DDR4_INIT0_OFFSET 0xD0
108#define DDR4_INIT0_SKIP_RAM_INIT GENMASK(31, 30)
109
110#define DDR4_RANKCTL_OFFSET 0xF4
111#define DDR4_RANKCTL_DIFF_RANK_RD_GAP GENMASK(7, 4)
112#define DDR4_RANKCTL_DIFF_RANK_WR_GAP GENMASK(11, 8)
113#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB BIT(24)
114#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB BIT(26)
115#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_SHIFT 4
116#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_SHIFT 8
117#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB_SHIFT 24
118#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB_SHIFT 26
119
120#define DDR4_RANKCTL1_OFFSET 0xF8
121#define DDR4_RANKCTL1_WR2RD_DR GENMASK(5, 0)
122
123#define DDR4_DRAMTMG2_OFFSET 0x108
124#define DDR4_DRAMTMG2_WR2RD GENMASK(5, 0)
125#define DDR4_DRAMTMG2_RD2WR GENMASK(13, 8)
126#define DDR4_DRAMTMG2_RD2WR_SHIFT 8
127
128#define DDR4_DRAMTMG9_OFFSET 0x124
129#define DDR4_DRAMTMG9_W2RD_S GENMASK(5, 0)
130
131#define DDR4_DFITMG1_OFFSET 0x194
132#define DDR4_DFITMG1_DFI_T_WRDATA_DELAY GENMASK(20, 16)
133#define DDR4_DFITMG1_DFI_T_WRDATA_SHIFT 16
134
135#define DDR4_DFIMISC_OFFSET 0x1B0
136#define DDR4_DFIMISC_DFI_INIT_COMPLETE_EN BIT(0)
137#define DDR4_DFIMISC_DFI_INIT_START BIT(5)
138
139#define DDR4_DFISTAT_OFFSET 0x1BC
140#define DDR4_DFI_INIT_COMPLETE BIT(0)
141
142#define DDR4_DBG0_OFFSET 0x300
143
144#define DDR4_DBG1_OFFSET 0x304
145#define DDR4_DBG1_DISDQ BIT(0)
146#define DDR4_DBG1_DIS_HIF BIT(1)
147
148#define DDR4_DBGCAM_OFFSET 0x308
149#define DDR4_DBGCAM_DBG_RD_Q_EMPTY BIT(25)
150#define DDR4_DBGCAM_DBG_WR_Q_EMPTY BIT(26)
151#define DDR4_DBGCAM_RD_DATA_PIPELINE_EMPTY BIT(28)
152#define DDR4_DBGCAM_WR_DATA_PIPELINE_EMPTY BIT(29)
153
154#define DDR4_SWCTL_OFFSET 0x320
155#define DDR4_SWCTL_SW_DONE BIT(0)
156
157#define DDR4_SWSTAT_OFFSET 0x324
158#define DDR4_SWSTAT_SW_DONE_ACK BIT(0)
159
160#define DDR4_PSTAT_OFFSET 0x3FC
161#define DDR4_PSTAT_RD_PORT_BUSY_0 BIT(0)
162#define DDR4_PSTAT_WR_PORT_BUSY_0 BIT(16)
163
164#define DDR4_PCTRL0_OFFSET 0x490
165#define DDR4_PCTRL0_PORT_EN BIT(0)
166
167#define DDR4_SBRCTL_OFFSET 0xF24
168#define DDR4_SBRCTL_SCRUB_INTERVAL 0x1FFF00
169#define DDR4_SBRCTL_SCRUB_EN BIT(0)
170#define DDR4_SBRCTL_SCRUB_WRITE BIT(2)
171#define DDR4_SBRCTL_SCRUB_BURST_1 BIT(4)
172
173#define DDR4_SBRSTAT_OFFSET 0xF28
174#define DDR4_SBRSTAT_SCRUB_BUSY BIT(0)
175#define DDR4_SBRSTAT_SCRUB_DONE BIT(1)
176
177#define DDR4_SBRWDATA0_OFFSET 0xF2C
178#define DDR4_SBRWDATA1_OFFSET 0xF30
179#define DDR4_SBRSTART0_OFFSET 0xF38
180#define DDR4_SBRSTART1_OFFSET 0xF3C
181#define DDR4_SBRRANGE0_OFFSET 0xF40
182#define DDR4_SBRRANGE1_OFFSET 0xF44
183
184/* DDR PHY */
185#define DDR_PHY_TXODTDRVSTREN_B0_P0 0x2009A
186#define DDR_PHY_RXPBDLYTG0_R0 0x200D0
187#define DDR_PHY_DBYTE0_TXDQDLYTG0_U0_P0 0x201A0
188
189#define DDR_PHY_DBYTE0_TXDQDLYTG0_U1_P0 0x203A0
190#define DDR_PHY_DBYTE1_TXDQDLYTG0_U0_P0 0x221A0
191#define DDR_PHY_DBYTE1_TXDQDLYTG0_U1_P0 0x223A0
192#define DDR_PHY_TXDQDLYTG0_COARSE_DELAY GENMASK(9, 6)
193#define DDR_PHY_TXDQDLYTG0_COARSE_DELAY_SHIFT 6
194
195#define DDR_PHY_CALRATE_OFFSET 0x40110
196#define DDR_PHY_CALZAP_OFFSET 0x40112
197#define DDR_PHY_SEQ0BDLY0_P0_OFFSET 0x40016
198#define DDR_PHY_SEQ0BDLY1_P0_OFFSET 0x40018
199#define DDR_PHY_SEQ0BDLY2_P0_OFFSET 0x4001A
200#define DDR_PHY_SEQ0BDLY3_P0_OFFSET 0x4001C
201
202#define DDR_PHY_MEMRESETL_OFFSET 0x400C0
203#define DDR_PHY_MEMRESETL_VALUE BIT(0)
204#define DDR_PHY_PROTECT_MEMRESET BIT(1)
205
206#define DDR_PHY_CALBUSY_OFFSET 0x4012E
207#define DDR_PHY_CALBUSY BIT(0)
208
209#define DDR_PHY_TRAIN_IMEM_OFFSET 0xA0000
210#define DDR_PHY_TRAIN_DMEM_OFFSET 0xA8000
211
212#define DMEM_MB_CDD_RR_1_0_OFFSET 0xA802C
213#define DMEM_MB_CDD_RR_0_1_OFFSET 0xA8030
214#define DMEM_MB_CDD_WW_1_0_OFFSET 0xA8038
215#define DMEM_MB_CDD_WW_0_1_OFFSET 0xA803C
216#define DMEM_MB_CDD_RW_1_1_OFFSET 0xA8046
217#define DMEM_MB_CDD_RW_1_0_OFFSET 0xA8048
218#define DMEM_MB_CDD_RW_0_1_OFFSET 0xA804A
219#define DMEM_MB_CDD_RW_0_0_OFFSET 0xA804C
220
221#define DMEM_MB_CDD_CHA_RR_1_0_OFFSET 0xA8026
222#define DMEM_MB_CDD_CHA_RR_0_1_OFFSET 0xA8026
223#define DMEM_MB_CDD_CHB_RR_1_0_OFFSET 0xA8058
224#define DMEM_MB_CDD_CHB_RR_0_1_OFFSET 0xA805A
225#define DMEM_MB_CDD_CHA_WW_1_0_OFFSET 0xA8030
226#define DMEM_MB_CDD_CHA_WW_0_1_OFFSET 0xA8030
227#define DMEM_MB_CDD_CHB_WW_1_0_OFFSET 0xA8062
228#define DMEM_MB_CDD_CHB_WW_0_1_OFFSET 0xA8064
229
230#define DMEM_MB_CDD_CHA_RW_1_1_OFFSET 0xA8028
231#define DMEM_MB_CDD_CHA_RW_1_0_OFFSET 0xA8028
232#define DMEM_MB_CDD_CHA_RW_0_1_OFFSET 0xA802A
233#define DMEM_MB_CDD_CHA_RW_0_0_OFFSET 0xA802A
234
235#define DMEM_MB_CDD_CHB_RW_1_1_OFFSET 0xA805A
236#define DMEM_MB_CDD_CHB_RW_1_0_OFFSET 0xA805C
237#define DMEM_MB_CDD_CHB_RW_0_1_OFFSET 0xA805c
238#define DMEM_MB_CDD_CHB_RW_0_0_OFFSET 0xA805E
239
240#define DDR_PHY_SEQ0DISABLEFLAG0_OFFSET 0x120018
241#define DDR_PHY_SEQ0DISABLEFLAG1_OFFSET 0x12001A
242#define DDR_PHY_SEQ0DISABLEFLAG2_OFFSET 0x12001C
243#define DDR_PHY_SEQ0DISABLEFLAG3_OFFSET 0x12001E
244#define DDR_PHY_SEQ0DISABLEFLAG4_OFFSET 0x120020
245#define DDR_PHY_SEQ0DISABLEFLAG5_OFFSET 0x120022
246#define DDR_PHY_SEQ0DISABLEFLAG6_OFFSET 0x120024
247#define DDR_PHY_SEQ0DISABLEFLAG7_OFFSET 0x120026
248
249#define DDR_PHY_UCCLKHCLKENABLES_OFFSET 0x180100
250#define DDR_PHY_UCCLKHCLKENABLES_UCCLKEN BIT(0)
251#define DDR_PHY_UCCLKHCLKENABLES_HCLKEN BIT(1)
252
253#define DDR_PHY_UCTWRITEPROT_OFFSET 0x180066
254#define DDR_PHY_UCTWRITEPROT BIT(0)
255
256#define DDR_PHY_APBONLY0_OFFSET 0x1A0000
257#define DDR_PHY_MICROCONTMUXSEL BIT(0)
258
259#define DDR_PHY_UCTSHADOWREGS_OFFSET 0x1A0008
260#define DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW BIT(0)
261
262#define DDR_PHY_DCTWRITEPROT_OFFSET 0x1A0062
263#define DDR_PHY_DCTWRITEPROT BIT(0)
264
265#define DDR_PHY_UCTWRITEONLYSHADOW_OFFSET 0x1A0064
266#define DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET 0x1A0068
267
268#define DDR_PHY_MICRORESET_OFFSET 0x1A0132
269#define DDR_PHY_MICRORESET_STALL BIT(0)
270#define DDR_PHY_MICRORESET_RESET BIT(3)
271
272#define DDR_PHY_TXODTDRVSTREN_B0_P1 0x22009A
273
274/* For firmware training */
275#define HW_DBG_TRACE_CONTROL_OFFSET 0x18
276#define FW_TRAINING_COMPLETED_STAT 0x07
277#define FW_TRAINING_FAILED_STAT 0xFF
278#define FW_COMPLETION_MSG_ONLY_MODE 0xFF
279#define FW_STREAMING_MSG_ID 0x08
280#define GET_LOWHW_DATA(x) ((x) & 0xFFFF)
281#define GET_LOWB_DATA(x) ((x) & 0xFF)
282#define GET_HIGHB_DATA(x) (((x) & 0xFF00) >> 8)
283
284/* Operating mode */
285#define OPM_INIT 0x000
286#define OPM_NORMAL 0x001
287#define OPM_PWR_D0WN 0x010
288#define OPM_SELF_SELFREF 0x011
289#define OPM_DDR4_DEEP_PWR_DOWN 0x100
290
291/* Refresh mode */
292#define FIXED_1X 0
293#define FIXED_2X BIT(0)
294#define FIXED_4X BIT(4)
295
296/* Address of mode register */
297#define MR0 0x0000
298#define MR1 0x0001
299#define MR2 0x0010
300#define MR3 0x0011
301#define MR4 0x0100
302#define MR5 0x0101
303#define MR6 0x0110
304#define MR7 0x0111
305
306/* MR rank */
307#define RANK0 0x1
308#define RANK1 0x2
309#define ALL_RANK 0x3
310
311#define MR5_BIT4 BIT(4)
312
313/* Value for ecc_region_map */
314#define ALL_PROTECTED 0x7F
315
316/* Region size for ECCCFG0.ecc_region_map */
317enum region_size {
318 ONE_EIGHT,
319 ONE_SIXTEENTH,
320 ONE_THIRTY_SECOND,
321 ONE_SIXTY_FOURTH
322};
323
324enum ddr_type {
325 DDRTYPE_LPDDR4_0,
326 DDRTYPE_LPDDR4_1,
327 DDRTYPE_DDR4,
328 DDRTYPE_UNKNOWN
329};
330
331/* Reset type */
332enum reset_type {
333 POR_RESET,
334 WARM_RESET,
335 COLD_RESET
336};
337
338/* DDR handoff structure */
339struct ddr_handoff {
340 /* Memory reset manager base */
341 phys_addr_t mem_reset_base;
342
343 /* First controller attributes */
344 phys_addr_t cntlr_handoff_base;
345 phys_addr_t cntlr_base;
346 size_t cntlr_total_length;
347 enum ddr_type cntlr_t;
348 size_t cntlr_handoff_length;
349
350 /* Second controller attributes*/
351 phys_addr_t cntlr2_handoff_base;
352 phys_addr_t cntlr2_base;
353 size_t cntlr2_total_length;
354 enum ddr_type cntlr2_t;
355 size_t cntlr2_handoff_length;
356
357 /* PHY attributes */
358 phys_addr_t phy_handoff_base;
359 phys_addr_t phy_base;
360 size_t phy_total_length;
361 size_t phy_handoff_length;
362
363 /* PHY engine attributes */
364 phys_addr_t phy_engine_handoff_base;
365 size_t phy_engine_total_length;
366 size_t phy_engine_handoff_length;
367
368 /* Calibration attributes */
369 phys_addr_t train_imem_base;
370 phys_addr_t train_dmem_base;
371 size_t train_imem_length;
372 size_t train_dmem_length;
373};
374
375/* Message mode */
376enum message_mode {
377 MAJOR_MESSAGE,
378 STREAMING_MESSAGE
379};
380
381static int clr_ca_parity_error_status(phys_addr_t umctl2_base)
382{
383 int ret;
384
385 debug("%s: Clear C/A parity error status in MR5[4]\n", __func__);
386
387 /* Set mode register MRS */
388 clrbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MPR_EN);
389
390 /* Set mode register to write operation */
391 setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MR_TYPE);
392
393 /* Set the address of mode rgister to 0x101(MR5) */
394 setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET,
395 (MR5 << DDR4_MRCTRL0_MR_ADDR_SHIFT) &
396 DDR4_MRCTRL0_MR_ADDR);
397
398 /* Set MR rank to rank 1 */
399 setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET,
400 (RANK1 << DDR4_MRCTRL0_MR_RANK_SHIFT) &
401 DDR4_MRCTRL0_MR_RANK);
402
403 /* Clear C/A parity error status in MR5[4] */
404 clrbits_le32(umctl2_base + DDR4_MRCTRL1_OFFSET, MR5_BIT4);
405
406 /* Trigger mode register read or write operation */
407 setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MR_WR);
408
409 /* Wait for retry done */
410 ret = wait_for_bit_le32((const void *)(umctl2_base +
411 DDR4_MRSTAT_OFFSET), DDR4_MRSTAT_MR_WR_BUSY,
412 false, TIMEOUT_200MS, false);
413 if (ret) {
414 debug("%s: Timeout while waiting for", __func__);
415 debug(" no outstanding MR transaction\n");
416 return ret;
417 }
418
419 return 0;
420}
421
422static int ddr_retry_software_sequence(phys_addr_t umctl2_base)
423{
424 u32 value;
425 int ret;
426
427 /* Check software can perform MRS/MPR/PDA? */
428 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
429 DDR4_CRCPARSTAT_DFI_ALERT_ERR_NO_SW;
430
431 if (value) {
432 /* Clear interrupt bit for DFI alert error */
433 setbits_le32(umctl2_base + DDR4_CRCPARCTL0_OFFSET,
434 DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR);
435 }
436
437 debug("%s: Software can perform MRS/MPR/PDA\n", __func__);
438
439 ret = wait_for_bit_le32((const void *)(umctl2_base +
440 DDR4_MRSTAT_OFFSET),
441 DDR4_MRSTAT_MR_WR_BUSY,
442 false, TIMEOUT_200MS, false);
443 if (ret) {
444 debug("%s: Timeout while waiting for", __func__);
445 debug(" no outstanding MR transaction\n");
446 return ret;
447 }
448
449 ret = clr_ca_parity_error_status(umctl2_base);
450 if (ret)
451 return ret;
452
453 if (!value) {
454 /* Clear interrupt bit for DFI alert error */
455 setbits_le32(umctl2_base + DDR4_CRCPARCTL0_OFFSET,
456 DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR);
457 }
458
459 return 0;
460}
461
462static int ensure_retry_procedure_complete(phys_addr_t umctl2_base)
463{
464 u32 value;
465 u32 start = get_timer(0);
466 int ret;
467
468 /* Check parity/crc/error window is emptied ? */
469 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
470 DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW;
471
472 /* Polling until parity/crc/error window is emptied */
473 while (value) {
474 if (get_timer(start) > TIMEOUT_200MS) {
475 debug("%s: Timeout while waiting for",
476 __func__);
477 debug(" parity/crc/error window empty\n");
478 return -ETIMEDOUT;
479 }
480
481 /* Check software intervention is enabled? */
482 value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
483 DDR4_CRCPARCTL1_ALERT_WAIT_FOR_SW;
484 if (value) {
485 debug("%s: Software intervention is enabled\n",
486 __func__);
487
488 /* Check dfi alert error interrupt is set? */
489 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
490 DDR4_CRCPARSTAT_DFI_ALERT_ERR_INT;
491
492 if (value) {
493 ret = ddr_retry_software_sequence(umctl2_base);
494 debug("%s: DFI alert error interrupt ",
495 __func__);
496 debug("is set\n");
497
498 if (ret)
499 return ret;
500 }
501
502 /*
503 * Check fatal parity error interrupt is set?
504 */
505 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
506 DDR4_CRCPARSTAT_DFI_ALERT_ERR_FATL_INT;
507 if (value) {
508 printf("%s: Fatal parity error ",
509 __func__);
510 printf("interrupt is set, Hang it!!\n");
511 hang();
512 }
513 }
514
515 value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) &
516 DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW;
517
518 udelay(1);
Stefan Roese80877fa2022-09-02 14:10:46 +0200519 schedule();
Tien Fong Cheef8e2eab2021-08-10 11:26:37 +0800520 }
521
522 return 0;
523}
524
525static int enable_quasi_dynamic_reg_grp3(phys_addr_t umctl2_base,
526 enum ddr_type umctl2_type)
527{
528 u32 i, value, backup;
529 int ret = 0;
530
531 /* Disable input traffic per port */
532 clrbits_le32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
533
534 /* Polling AXI port until idle */
535 ret = wait_for_bit_le32((const void *)(umctl2_base +
536 DDR4_PSTAT_OFFSET),
537 DDR4_PSTAT_WR_PORT_BUSY_0 |
538 DDR4_PSTAT_RD_PORT_BUSY_0, false,
539 TIMEOUT_200MS, false);
540 if (ret) {
541 debug("%s: Timeout while waiting for", __func__);
542 debug(" controller idle\n");
543 return ret;
544 }
545
546 /* Backup user setting */
547 backup = readl(umctl2_base + DDR4_DBG1_OFFSET);
548
549 /* Disable input traffic to the controller */
550 setbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DIS_HIF);
551
552 /*
553 * Ensure CAM/data pipelines are empty.
554 * Poll until CAM/data pipelines are set at least twice,
555 * timeout at 200ms
556 */
557 for (i = 0; i < 2; i++) {
558 ret = wait_for_bit_le32((const void *)(umctl2_base +
559 DDR4_DBGCAM_OFFSET),
560 DDR4_DBGCAM_WR_DATA_PIPELINE_EMPTY |
561 DDR4_DBGCAM_RD_DATA_PIPELINE_EMPTY |
562 DDR4_DBGCAM_DBG_WR_Q_EMPTY |
563 DDR4_DBGCAM_DBG_RD_Q_EMPTY, true,
564 TIMEOUT_200MS, false);
565 if (ret) {
566 debug("%s: loop(%u): Timeout while waiting for",
567 __func__, i + 1);
568 debug(" CAM/data pipelines are empty\n");
569
570 goto out;
571 }
572 }
573
574 if (umctl2_type == DDRTYPE_DDR4) {
575 /* Check DDR4 retry is enabled ? */
576 value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
577 DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE;
578
579 if (value) {
580 debug("%s: DDR4 retry is enabled\n", __func__);
581
582 ret = ensure_retry_procedure_complete(umctl2_base);
583 if (ret) {
584 debug("%s: Timeout while waiting for",
585 __func__);
586 debug(" retry procedure complete\n");
587
588 goto out;
589 }
590 }
591 }
592
593 debug("%s: Quasi-dynamic group 3 registers are enabled\n", __func__);
594
595out:
596 /* Restore user setting */
597 writel(backup, umctl2_base + DDR4_DBG1_OFFSET);
598
599 return ret;
600}
601
602static enum ddr_type get_ddr_type(phys_addr_t ddr_type_location)
603{
604 u32 ddr_type_magic = readl(ddr_type_location);
605
606 if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_DDR4_TYPE)
607 return DDRTYPE_DDR4;
608
609 if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_0_TYPE)
610 return DDRTYPE_LPDDR4_0;
611
612 if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_1_TYPE)
613 return DDRTYPE_LPDDR4_1;
614
615 return DDRTYPE_UNKNOWN;
616}
617
618static void use_lpddr4_interleaving(bool set)
619{
620 if (set) {
621 printf("Starting LPDDR4 interleaving configuration ...\n");
622 setbits_le32(FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0,
623 BIT(5));
624 } else {
625 printf("Starting LPDDR4 non-interleaving configuration ...\n");
626 clrbits_le32(FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0,
627 BIT(5));
628 }
629}
630
631static void use_ddr4(enum ddr_type type)
632{
633 if (type == DDRTYPE_DDR4) {
634 printf("Starting DDR4 configuration ...\n");
635 setbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_DDR_MODE,
636 SYSMGR_SOC64_DDR_MODE_MSK);
637 } else if (type == DDRTYPE_LPDDR4_0) {
638 printf("Starting LPDDR4 configuration ...\n");
639 clrbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_DDR_MODE,
640 SYSMGR_SOC64_DDR_MODE_MSK);
641
642 use_lpddr4_interleaving(false);
643 }
644}
645
646static int scrubber_ddr_config(phys_addr_t umctl2_base,
647 enum ddr_type umctl2_type)
648{
649 u32 backup[9];
650 int ret;
651
652 /* Reset to default value, prevent scrubber stop due to lower power */
653 writel(0, umctl2_base + DDR4_PWRCTL_OFFSET);
654
655 /* Backup user settings */
656 backup[0] = readl(umctl2_base + DDR4_SBRCTL_OFFSET);
657 backup[1] = readl(umctl2_base + DDR4_SBRWDATA0_OFFSET);
658 backup[2] = readl(umctl2_base + DDR4_SBRSTART0_OFFSET);
659 if (umctl2_type == DDRTYPE_DDR4) {
660 backup[3] = readl(umctl2_base + DDR4_SBRWDATA1_OFFSET);
661 backup[4] = readl(umctl2_base + DDR4_SBRSTART1_OFFSET);
662 }
663 backup[5] = readl(umctl2_base + DDR4_SBRRANGE0_OFFSET);
664 backup[6] = readl(umctl2_base + DDR4_SBRRANGE1_OFFSET);
665 backup[7] = readl(umctl2_base + DDR4_ECCCFG0_OFFSET);
666 backup[8] = readl(umctl2_base + DDR4_ECCCFG1_OFFSET);
667
668 if (umctl2_type != DDRTYPE_DDR4) {
669 /* Lock ECC region, ensure this regions is not being accessed */
670 setbits_le32(umctl2_base + DDR4_ECCCFG1_OFFSET,
671 LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK);
672 }
673 /* Disable input traffic per port */
674 clrbits_le32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
675 /* Disables scrubber */
676 clrbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
677 /* Polling all scrub writes data have been sent */
678 ret = wait_for_bit_le32((const void *)(umctl2_base +
679 DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_BUSY,
680 false, TIMEOUT_5000MS, false);
681 if (ret) {
682 debug("%s: Timeout while waiting for", __func__);
683 debug(" sending all scrub data\n");
684 return ret;
685 }
686
687 /* LPDDR4 supports inline ECC only */
688 if (umctl2_type != DDRTYPE_DDR4) {
689 /*
690 * Setting all regions for protected, this is required for
691 * srubber to init whole LPDDR4 expect ECC region
692 */
693 writel(((ONE_EIGHT <<
694 LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT) |
695 (ALL_PROTECTED << LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT)),
696 umctl2_base + DDR4_ECCCFG0_OFFSET);
697 }
698
699 /* Scrub_burst = 1, scrub_mode = 1(performs writes) */
700 writel(DDR4_SBRCTL_SCRUB_BURST_1 | DDR4_SBRCTL_SCRUB_WRITE,
701 umctl2_base + DDR4_SBRCTL_OFFSET);
702
703 /* Zeroing whole DDR */
704 writel(0, umctl2_base + DDR4_SBRWDATA0_OFFSET);
705 writel(0, umctl2_base + DDR4_SBRSTART0_OFFSET);
706 if (umctl2_type == DDRTYPE_DDR4) {
707 writel(0, umctl2_base + DDR4_SBRWDATA1_OFFSET);
708 writel(0, umctl2_base + DDR4_SBRSTART1_OFFSET);
709 }
710 writel(0, umctl2_base + DDR4_SBRRANGE0_OFFSET);
711 writel(0, umctl2_base + DDR4_SBRRANGE1_OFFSET);
712
713 /* Enables scrubber */
714 setbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
715 /* Polling all scrub writes commands have been sent */
716 ret = wait_for_bit_le32((const void *)(umctl2_base +
717 DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_DONE,
718 true, TIMEOUT_5000MS, false);
719 if (ret) {
720 debug("%s: Timeout while waiting for", __func__);
721 debug(" sending all scrub commands\n");
722 return ret;
723 }
724
725 /* Polling all scrub writes data have been sent */
726 ret = wait_for_bit_le32((const void *)(umctl2_base +
727 DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_BUSY,
728 false, TIMEOUT_5000MS, false);
729 if (ret) {
730 printf("%s: Timeout while waiting for", __func__);
731 printf(" sending all scrub data\n");
732 return ret;
733 }
734
735 /* Disables scrubber */
736 clrbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
737
738 /* Restore user settings */
739 writel(backup[0], umctl2_base + DDR4_SBRCTL_OFFSET);
740 writel(backup[1], umctl2_base + DDR4_SBRWDATA0_OFFSET);
741 writel(backup[2], umctl2_base + DDR4_SBRSTART0_OFFSET);
742 if (umctl2_type == DDRTYPE_DDR4) {
743 writel(backup[3], umctl2_base + DDR4_SBRWDATA1_OFFSET);
744 writel(backup[4], umctl2_base + DDR4_SBRSTART1_OFFSET);
745 }
746 writel(backup[5], umctl2_base + DDR4_SBRRANGE0_OFFSET);
747 writel(backup[6], umctl2_base + DDR4_SBRRANGE1_OFFSET);
748 writel(backup[7], umctl2_base + DDR4_ECCCFG0_OFFSET);
749 writel(backup[8], umctl2_base + DDR4_ECCCFG1_OFFSET);
750
751 /* Enables ECC scrub on scrubber */
752 if (!(readl(umctl2_base + DDR4_SBRCTL_OFFSET) &
753 DDR4_SBRCTL_SCRUB_WRITE)) {
754 /* Enables scrubber */
755 setbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET,
756 DDR4_SBRCTL_SCRUB_EN);
757 }
758
759 return 0;
760}
761
762static void handoff_process(struct ddr_handoff *ddr_handoff_info,
763 phys_addr_t handoff_base, size_t length,
764 phys_addr_t base)
765{
766 u32 handoff_table[length];
767 u32 i, value = 0;
768
769 /* Execute configuration handoff */
770 socfpga_handoff_read((void *)handoff_base, handoff_table, length);
771
772 for (i = 0; i < length; i = i + 2) {
773 debug("%s: wr = 0x%08x ", __func__, handoff_table[i + 1]);
774 if (ddr_handoff_info && base == ddr_handoff_info->phy_base) {
775 /*
776 * Convert PHY odd offset to even offset that
777 * supported by ARM processor.
778 */
779 value = handoff_table[i] << 1;
780
781 writew(handoff_table[i + 1],
782 (uintptr_t)(value + base));
783 debug("rd = 0x%08x ",
784 readw((uintptr_t)(value + base)));
785 debug("PHY offset: 0x%08x ", handoff_table[i + 1]);
786 } else {
787 value = handoff_table[i];
788 writel(handoff_table[i + 1], (uintptr_t)(value +
789 base));
790 debug("rd = 0x%08x ",
791 readl((uintptr_t)(value + base)));
792 }
793
794 debug("Absolute addr: 0x%08llx, APB offset: 0x%08x\n",
795 value + base, value);
796 }
797}
798
799static int init_umctl2(phys_addr_t umctl2_handoff_base,
800 phys_addr_t umctl2_base, enum ddr_type umctl2_type,
801 size_t umctl2_handoff_length,
802 u32 *user_backup)
803{
804 int ret;
805
806 if (umctl2_type == DDRTYPE_DDR4)
807 printf("Initializing DDR4 controller ...\n");
808 else if (umctl2_type == DDRTYPE_LPDDR4_0)
809 printf("Initializing LPDDR4_0 controller ...\n");
810 else if (umctl2_type == DDRTYPE_LPDDR4_1)
811 printf("Initializing LPDDR4_1 controller ...\n");
812
813 /* Prevent controller from issuing read/write to SDRAM */
814 setbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DISDQ);
815
816 /* Put SDRAM into self-refresh */
817 setbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_EN);
818
819 /* Enable quasi-dynamic programing of the controller registers */
820 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
821
822 /* Ensure the controller is in initialization mode */
823 ret = wait_for_bit_le32((const void *)(umctl2_base + DDR4_STAT_OFFSET),
824 DDR4_STAT_OPERATING_MODE, false, TIMEOUT_200MS,
825 false);
826 if (ret) {
827 debug("%s: Timeout while waiting for", __func__);
828 debug(" init operating mode\n");
829 return ret;
830 }
831
832 debug("%s: UMCTL2 handoff base address = 0x%p table length = 0x%08x\n",
833 __func__, (u32 *)umctl2_handoff_base,
834 (u32)umctl2_handoff_length);
835
836 handoff_process(NULL, umctl2_handoff_base, umctl2_handoff_length,
837 umctl2_base);
838
839 /* Backup user settings, restore after DDR up running */
840 *user_backup = readl(umctl2_base + DDR4_PWRCTL_OFFSET);
841
842 /* Disable self resfresh */
843 clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_EN);
844
845 if (umctl2_type == DDRTYPE_LPDDR4_0 ||
846 umctl2_type == DDRTYPE_LPDDR4_1) {
847 /* Setting selfref_sw to 1, based on lpddr4 requirement */
848 setbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET,
849 DDR4_PWRCTL_SELFREF_SW);
850
851 /* Backup user settings, restore after DDR up running */
852 user_backup++;
853 *user_backup = readl(umctl2_base + DDR4_INIT0_OFFSET) &
854 DDR4_INIT0_SKIP_RAM_INIT;
855
856 /*
857 * Setting INIT0.skip_dram_init to 0x3, based on lpddr4
858 * requirement
859 */
860 setbits_le32(umctl2_base + DDR4_INIT0_OFFSET,
861 DDR4_INIT0_SKIP_RAM_INIT);
862 }
863
864 /* Complete quasi-dynamic register programming */
865 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
866
867 /* Enable controller from issuing read/write to SDRAM */
868 clrbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DISDQ);
869
870 return 0;
871}
872
873static int phy_pre_handoff_config(phys_addr_t umctl2_base,
874 enum ddr_type umctl2_type)
875{
876 int ret;
877 u32 value;
878
879 if (umctl2_type == DDRTYPE_DDR4) {
880 /* Check DDR4 retry is enabled ? */
881 value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) &
882 DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE;
883
884 if (value) {
885 debug("%s: DDR4 retry is enabled\n", __func__);
886 debug("%s: Disable auto refresh is not supported\n",
887 __func__);
888 } else {
889 /* Disable auto refresh */
890 setbits_le32(umctl2_base + DDR4_RFSHCTL3_OFFSET,
891 DDR4_RFSHCTL3_DIS_AUTO_REFRESH);
892 }
893 }
894
895 /* Disable selfref_en & powerdown_en, nvr disable dfi dram clk */
896 clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET,
897 DDR4_PWRCTL_EN_DFI_DRAM_CLK_DISABLE |
898 DDR4_PWRCTL_POWERDOWN_EN | DDR4_PWRCTL_SELFREF_EN);
899
900 /* Enable quasi-dynamic programing of the controller registers */
901 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
902
903 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
904 if (ret)
905 return ret;
906
907 /* Masking dfi init complete */
908 clrbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
909 DDR4_DFIMISC_DFI_INIT_COMPLETE_EN);
910
911 /* Complete quasi-dynamic register programming */
912 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
913
914 /* Polling programming done */
915 ret = wait_for_bit_le32((const void *)(umctl2_base +
916 DDR4_SWSTAT_OFFSET), DDR4_SWSTAT_SW_DONE_ACK,
917 true, TIMEOUT_200MS, false);
918 if (ret) {
919 debug("%s: Timeout while waiting for", __func__);
920 debug(" programming done\n");
921 }
922
923 return ret;
924}
925
926static int init_phy(struct ddr_handoff *ddr_handoff_info)
927{
928 int ret;
929
930 printf("Initializing DDR PHY ...\n");
931
932 if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
933 ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
934 ret = phy_pre_handoff_config(ddr_handoff_info->cntlr_base,
935 ddr_handoff_info->cntlr_t);
936 if (ret)
937 return ret;
938 }
939
940 if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
941 ret = phy_pre_handoff_config
942 (ddr_handoff_info->cntlr2_base,
943 ddr_handoff_info->cntlr2_t);
944 if (ret)
945 return ret;
946 }
947
948 /* Execute PHY configuration handoff */
949 handoff_process(ddr_handoff_info, ddr_handoff_info->phy_handoff_base,
950 ddr_handoff_info->phy_handoff_length,
951 ddr_handoff_info->phy_base);
952
953 printf("DDR PHY configuration is completed\n");
954
955 return 0;
956}
957
958static void phy_init_engine(struct ddr_handoff *handoff)
959{
960 printf("Load PHY Init Engine ...\n");
961
962 /* Execute PIE production code handoff */
963 handoff_process(handoff, handoff->phy_engine_handoff_base,
964 handoff->phy_engine_handoff_length, handoff->phy_base);
965
966 printf("End of loading PHY Init Engine\n");
967}
968
969int populate_ddr_handoff(struct ddr_handoff *handoff)
970{
971 phys_addr_t next_section_header;
972
973 /* DDR handoff */
974 handoff->mem_reset_base = SOC64_HANDOFF_DDR_MEMRESET_BASE;
975 debug("%s: DDR memory reset base = 0x%x\n", __func__,
976 (u32)handoff->mem_reset_base);
977 debug("%s: DDR memory reset address = 0x%x\n", __func__,
978 readl(handoff->mem_reset_base));
979
980 /* Beginning of DDR controller handoff */
981 handoff->cntlr_handoff_base = SOC64_HANDOFF_DDR_UMCTL2_SECTION;
982 debug("%s: cntlr handoff base = 0x%x\n", __func__,
983 (u32)handoff->cntlr_handoff_base);
984
985 /* Get 1st DDR type */
986 handoff->cntlr_t = get_ddr_type(handoff->cntlr_handoff_base +
987 SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET);
988 if (handoff->cntlr_t == DDRTYPE_LPDDR4_1 ||
989 handoff->cntlr_t == DDRTYPE_UNKNOWN) {
990 debug("%s: Wrong DDR handoff format, the 1st DDR ", __func__);
991 debug("type must be DDR4 or LPDDR4_0\n");
992 return -ENOEXEC;
993 }
994
995 /* 1st cntlr base physical address */
996 handoff->cntlr_base = readl(handoff->cntlr_handoff_base +
997 SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET);
998 debug("%s: cntlr base = 0x%x\n", __func__, (u32)handoff->cntlr_base);
999
1000 /* Get the total length of DDR cntlr handoff section */
1001 handoff->cntlr_total_length = readl(handoff->cntlr_handoff_base +
1002 SOC64_HANDOFF_OFFSET_LENGTH);
1003 debug("%s: Umctl2 total length in byte = 0x%x\n", __func__,
1004 (u32)handoff->cntlr_total_length);
1005
1006 /* Get the length of user setting data in DDR cntlr handoff section */
1007 handoff->cntlr_handoff_length = socfpga_get_handoff_size((void *)
1008 handoff->cntlr_handoff_base);
1009 debug("%s: Umctl2 handoff length in word(32-bit) = 0x%x\n", __func__,
1010 (u32)handoff->cntlr_handoff_length);
1011
1012 /* Wrong format on user setting data */
1013 if (handoff->cntlr_handoff_length < 0) {
1014 debug("%s: Wrong format on user setting data\n", __func__);
1015 return -ENOEXEC;
1016 }
1017
1018 /* Get the next handoff section address */
1019 next_section_header = handoff->cntlr_handoff_base +
1020 handoff->cntlr_total_length;
1021 debug("%s: Next handoff section header location = 0x%llx\n", __func__,
1022 next_section_header);
1023
1024 /*
1025 * Checking next section handoff is cntlr or PHY, and changing
1026 * subsequent implementation accordingly
1027 */
1028 if (readl(next_section_header) == SOC64_HANDOFF_DDR_UMCTL2_MAGIC) {
1029 /* Get the next cntlr handoff section address */
1030 handoff->cntlr2_handoff_base = next_section_header;
1031 debug("%s: umctl2 2nd handoff base = 0x%x\n", __func__,
1032 (u32)handoff->cntlr2_handoff_base);
1033
1034 /* Get 2nd DDR type */
1035 handoff->cntlr2_t = get_ddr_type(handoff->cntlr2_handoff_base +
1036 SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET);
1037 if (handoff->cntlr2_t == DDRTYPE_LPDDR4_0 ||
1038 handoff->cntlr2_t == DDRTYPE_UNKNOWN) {
1039 debug("%s: Wrong DDR handoff format, the 2nd DDR ",
1040 __func__);
1041 debug("type must be LPDDR4_1\n");
1042 return -ENOEXEC;
1043 }
1044
1045 /* 2nd umctl2 base physical address */
1046 handoff->cntlr2_base =
1047 readl(handoff->cntlr2_handoff_base +
1048 SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET);
1049 debug("%s: cntlr2 base = 0x%x\n", __func__,
1050 (u32)handoff->cntlr2_base);
1051
1052 /* Get the total length of 2nd DDR umctl2 handoff section */
1053 handoff->cntlr2_total_length =
1054 readl(handoff->cntlr2_handoff_base +
1055 SOC64_HANDOFF_OFFSET_LENGTH);
1056 debug("%s: Umctl2_2nd total length in byte = 0x%x\n", __func__,
1057 (u32)handoff->cntlr2_total_length);
1058
1059 /*
1060 * Get the length of user setting data in DDR umctl2 handoff
1061 * section
1062 */
1063 handoff->cntlr2_handoff_length =
1064 socfpga_get_handoff_size((void *)
1065 handoff->cntlr2_handoff_base);
1066 debug("%s: cntlr2 handoff length in word(32-bit) = 0x%x\n",
1067 __func__,
1068 (u32)handoff->cntlr2_handoff_length);
1069
1070 /* Wrong format on user setting data */
1071 if (handoff->cntlr2_handoff_length < 0) {
1072 debug("%s: Wrong format on umctl2 user setting data\n",
1073 __func__);
1074 return -ENOEXEC;
1075 }
1076
1077 /* Get the next handoff section address */
1078 next_section_header = handoff->cntlr2_handoff_base +
1079 handoff->cntlr2_total_length;
1080 debug("%s: Next handoff section header location = 0x%llx\n",
1081 __func__, next_section_header);
1082 }
1083
1084 /* Checking next section handoff is PHY ? */
1085 if (readl(next_section_header) == SOC64_HANDOFF_DDR_PHY_MAGIC) {
1086 /* DDR PHY handoff */
1087 handoff->phy_handoff_base = next_section_header;
1088 debug("%s: PHY handoff base = 0x%x\n", __func__,
1089 (u32)handoff->phy_handoff_base);
1090
1091 /* PHY base physical address */
1092 handoff->phy_base = readl(handoff->phy_handoff_base +
1093 SOC64_HANDOFF_DDR_PHY_BASE_OFFSET);
1094 debug("%s: PHY base = 0x%x\n", __func__,
1095 (u32)handoff->phy_base);
1096
1097 /* Get the total length of PHY handoff section */
1098 handoff->phy_total_length = readl(handoff->phy_handoff_base +
1099 SOC64_HANDOFF_OFFSET_LENGTH);
1100 debug("%s: PHY total length in byte = 0x%x\n", __func__,
1101 (u32)handoff->phy_total_length);
1102
1103 /*
1104 * Get the length of user setting data in DDR PHY handoff
1105 * section
1106 */
1107 handoff->phy_handoff_length = socfpga_get_handoff_size((void *)
1108 handoff->phy_handoff_base);
1109 debug("%s: PHY handoff length in word(32-bit) = 0x%x\n",
1110 __func__, (u32)handoff->phy_handoff_length);
1111
1112 /* Wrong format on PHY user setting data */
1113 if (handoff->phy_handoff_length < 0) {
1114 debug("%s: Wrong format on PHY user setting data\n",
1115 __func__);
1116 return -ENOEXEC;
1117 }
1118
1119 /* Get the next handoff section address */
1120 next_section_header = handoff->phy_handoff_base +
1121 handoff->phy_total_length;
1122 debug("%s: Next handoff section header location = 0x%llx\n",
1123 __func__, next_section_header);
1124 } else {
1125 debug("%s: Wrong format for DDR handoff, expect PHY",
1126 __func__);
1127 debug(" handoff section after umctl2 handoff section\n");
1128 return -ENOEXEC;
1129 }
1130
1131 /* Checking next section handoff is PHY init Engine ? */
1132 if (readl(next_section_header) ==
1133 SOC64_HANDOFF_DDR_PHY_INIT_ENGINE_MAGIC) {
1134 /* DDR PHY Engine handoff */
1135 handoff->phy_engine_handoff_base = next_section_header;
1136 debug("%s: PHY init engine handoff base = 0x%x\n", __func__,
1137 (u32)handoff->phy_engine_handoff_base);
1138
1139 /* Get the total length of PHY init engine handoff section */
1140 handoff->phy_engine_total_length =
1141 readl(handoff->phy_engine_handoff_base +
1142 SOC64_HANDOFF_OFFSET_LENGTH);
1143 debug("%s: PHY engine total length in byte = 0x%x\n", __func__,
1144 (u32)handoff->phy_engine_total_length);
1145
1146 /*
1147 * Get the length of user setting data in DDR PHY init engine
1148 * handoff section
1149 */
1150 handoff->phy_engine_handoff_length =
1151 socfpga_get_handoff_size((void *)
1152 handoff->phy_engine_handoff_base);
1153 debug("%s: PHY engine handoff length in word(32-bit) = 0x%x\n",
1154 __func__, (u32)handoff->phy_engine_handoff_length);
1155
1156 /* Wrong format on PHY init engine setting data */
1157 if (handoff->phy_engine_handoff_length < 0) {
1158 debug("%s: Wrong format on PHY init engine ",
1159 __func__);
1160 debug("user setting data\n");
1161 return -ENOEXEC;
1162 }
1163 } else {
1164 debug("%s: Wrong format for DDR handoff, expect PHY",
1165 __func__);
1166 debug(" init engine handoff section after PHY handoff\n");
1167 debug(" section\n");
1168 return -ENOEXEC;
1169 }
1170
1171 handoff->train_imem_base = handoff->phy_base +
1172 DDR_PHY_TRAIN_IMEM_OFFSET;
1173 debug("%s: PHY train IMEM base = 0x%x\n",
1174 __func__, (u32)handoff->train_imem_base);
1175
1176 handoff->train_dmem_base = handoff->phy_base +
1177 DDR_PHY_TRAIN_DMEM_OFFSET;
1178 debug("%s: PHY train DMEM base = 0x%x\n",
1179 __func__, (u32)handoff->train_dmem_base);
1180
1181 handoff->train_imem_length = SOC64_HANDOFF_DDR_TRAIN_IMEM_LENGTH;
1182 debug("%s: PHY train IMEM length = 0x%x\n",
1183 __func__, (u32)handoff->train_imem_length);
1184
1185 handoff->train_dmem_length = SOC64_HANDOFF_DDR_TRAIN_DMEM_LENGTH;
1186 debug("%s: PHY train DMEM length = 0x%x\n",
1187 __func__, (u32)handoff->train_dmem_length);
1188
1189 return 0;
1190}
1191
1192int enable_ddr_clock(struct udevice *dev)
1193{
1194 struct clk *ddr_clk;
1195 int ret;
1196
1197 /* Enable clock before init DDR */
1198 ddr_clk = devm_clk_get(dev, "mem_clk");
1199 if (!IS_ERR(ddr_clk)) {
1200 ret = clk_enable(ddr_clk);
1201 if (ret) {
1202 printf("%s: Failed to enable DDR clock\n", __func__);
1203 return ret;
1204 }
1205 } else {
1206 ret = PTR_ERR(ddr_clk);
1207 debug("%s: Failed to get DDR clock from dts\n", __func__);
1208 return ret;
1209 }
1210
1211 printf("%s: DDR clock is enabled\n", __func__);
1212
1213 return 0;
1214}
1215
1216static int ddr_start_dfi_init(phys_addr_t umctl2_base,
1217 enum ddr_type umctl2_type)
1218{
1219 int ret;
1220
1221 debug("%s: Start DFI init\n", __func__);
1222
1223 /* Enable quasi-dynamic programing of controller registers */
1224 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1225
1226 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
1227 if (ret)
1228 return ret;
1229
1230 /* Start DFI init sequence */
1231 setbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
1232 DDR4_DFIMISC_DFI_INIT_START);
1233
1234 /* Complete quasi-dynamic register programming */
1235 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1236
1237 /* Polling programming done */
1238 ret = wait_for_bit_le32((const void *)(umctl2_base +
1239 DDR4_SWSTAT_OFFSET),
1240 DDR4_SWSTAT_SW_DONE_ACK, true,
1241 TIMEOUT_200MS, false);
1242 if (ret) {
1243 debug("%s: Timeout while waiting for", __func__);
1244 debug(" programming done\n");
1245 }
1246
1247 return ret;
1248}
1249
1250static int ddr_check_dfi_init_complete(phys_addr_t umctl2_base,
1251 enum ddr_type umctl2_type)
1252{
1253 int ret;
1254
1255 /* Polling DFI init complete */
1256 ret = wait_for_bit_le32((const void *)(umctl2_base +
1257 DDR4_DFISTAT_OFFSET),
1258 DDR4_DFI_INIT_COMPLETE, true,
1259 TIMEOUT_200MS, false);
1260 if (ret) {
1261 debug("%s: Timeout while waiting for", __func__);
1262 debug(" DFI init done\n");
1263 return ret;
1264 }
1265
1266 debug("%s: DFI init completed.\n", __func__);
1267
1268 /* Enable quasi-dynamic programing of controller registers */
1269 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1270
1271 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
1272 if (ret)
1273 return ret;
1274
1275 /* Stop DFI init sequence */
1276 clrbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
1277 DDR4_DFIMISC_DFI_INIT_START);
1278
1279 /* Complete quasi-dynamic register programming */
1280 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1281
1282 /* Polling programming done */
1283 ret = wait_for_bit_le32((const void *)(umctl2_base +
1284 DDR4_SWSTAT_OFFSET),
1285 DDR4_SWSTAT_SW_DONE_ACK, true,
1286 TIMEOUT_200MS, false);
1287 if (ret) {
1288 debug("%s: Timeout while waiting for", __func__);
1289 debug(" programming done\n");
1290 return ret;
1291 }
1292
1293 debug("%s:DDR programming done\n", __func__);
1294
1295 return ret;
1296}
1297
1298static int ddr_trigger_sdram_init(phys_addr_t umctl2_base,
1299 enum ddr_type umctl2_type)
1300{
1301 int ret;
1302
1303 /* Enable quasi-dynamic programing of controller registers */
1304 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1305
1306 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
1307 if (ret)
1308 return ret;
1309
1310 /* Unmasking dfi init complete */
1311 setbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET,
1312 DDR4_DFIMISC_DFI_INIT_COMPLETE_EN);
1313
1314 /* Software exit from self-refresh */
1315 clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_SW);
1316
1317 /* Complete quasi-dynamic register programming */
1318 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1319
1320 /* Polling programming done */
1321 ret = wait_for_bit_le32((const void *)(umctl2_base +
1322 DDR4_SWSTAT_OFFSET),
1323 DDR4_SWSTAT_SW_DONE_ACK, true,
1324 TIMEOUT_200MS, false);
1325 if (ret) {
1326 debug("%s: Timeout while waiting for", __func__);
1327 debug(" programming done\n");
1328 return ret;
1329 }
1330
1331 debug("%s:DDR programming done\n", __func__);
1332 return ret;
1333}
1334
1335static int ddr_post_handoff_config(phys_addr_t umctl2_base,
1336 enum ddr_type umctl2_type)
1337{
1338 int ret = 0;
1339 u32 value;
1340 u32 start = get_timer(0);
1341
1342 do {
1343 if (get_timer(start) > TIMEOUT_200MS) {
1344 debug("%s: Timeout while waiting for",
1345 __func__);
1346 debug(" DDR enters normal operating mode\n");
1347 return -ETIMEDOUT;
1348 }
1349
1350 udelay(1);
Stefan Roese80877fa2022-09-02 14:10:46 +02001351 schedule();
Tien Fong Cheef8e2eab2021-08-10 11:26:37 +08001352
1353 /* Polling until SDRAM entered normal operating mode */
1354 value = readl(umctl2_base + DDR4_STAT_OFFSET) &
1355 DDR4_STAT_OPERATING_MODE;
1356 } while (value != OPM_NORMAL);
1357
1358 printf("DDR entered normal operating mode\n");
1359
1360 /* Enabling auto refresh */
1361 clrbits_le32(umctl2_base + DDR4_RFSHCTL3_OFFSET,
1362 DDR4_RFSHCTL3_DIS_AUTO_REFRESH);
1363
1364 /* Checking ECC is enabled? */
1365 value = readl(umctl2_base + DDR4_ECCCFG0_OFFSET) & DDR4_ECC_MODE;
1366 if (value) {
1367 printf("ECC is enabled\n");
1368 ret = scrubber_ddr_config(umctl2_base, umctl2_type);
1369 if (ret)
1370 printf("Failed to enable ECC\n");
1371 }
1372
1373 return ret;
1374}
1375
1376static int configure_training_firmware(struct ddr_handoff *ddr_handoff_info,
1377 const void *train_imem,
1378 const void *train_dmem)
1379{
1380 int ret = 0;
1381
1382 printf("Configuring training firmware ...\n");
1383
1384 /* Reset SDRAM */
1385 writew(DDR_PHY_PROTECT_MEMRESET,
1386 (uintptr_t)(ddr_handoff_info->phy_base +
1387 DDR_PHY_MEMRESETL_OFFSET));
1388
1389 /* Enable access to the PHY configuration registers */
1390 clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_APBONLY0_OFFSET,
1391 DDR_PHY_MICROCONTMUXSEL);
1392
1393 /* Copy train IMEM bin */
1394 memcpy((void *)ddr_handoff_info->train_imem_base, train_imem,
1395 ddr_handoff_info->train_imem_length);
1396
1397 ret = memcmp((void *)ddr_handoff_info->train_imem_base, train_imem,
1398 ddr_handoff_info->train_imem_length);
1399 if (ret) {
1400 debug("%s: Failed to copy train IMEM binary\n", __func__);
1401 /* Isolate the APB access from internal CSRs */
1402 setbits_le16(ddr_handoff_info->phy_base +
1403 DDR_PHY_APBONLY0_OFFSET, DDR_PHY_MICROCONTMUXSEL);
1404 return ret;
1405 }
1406
1407 memcpy((void *)ddr_handoff_info->train_dmem_base, train_dmem,
1408 ddr_handoff_info->train_dmem_length);
1409
1410 ret = memcmp((void *)ddr_handoff_info->train_dmem_base, train_dmem,
1411 ddr_handoff_info->train_dmem_length);
1412 if (ret)
1413 debug("%s: Failed to copy train DMEM binary\n", __func__);
1414
1415 /* Isolate the APB access from internal CSRs */
1416 setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_APBONLY0_OFFSET,
1417 DDR_PHY_MICROCONTMUXSEL);
1418
1419 return ret;
1420}
1421
1422static void calibrating_sdram(struct ddr_handoff *ddr_handoff_info)
1423{
1424 /* Init mailbox protocol - set 1 to DCTWRITEPROT[0] */
1425 setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1426 DDR_PHY_DCTWRITEPROT);
1427
1428 /* Init mailbox protocol - set 1 to UCTWRITEPROT[0] */
1429 setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_UCTWRITEPROT_OFFSET,
1430 DDR_PHY_UCTWRITEPROT);
1431
1432 /* Reset and stalling ARC processor */
1433 setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
1434 DDR_PHY_MICRORESET_RESET | DDR_PHY_MICRORESET_STALL);
1435
1436 /* Release ARC processor */
1437 clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
1438 DDR_PHY_MICRORESET_RESET);
1439
1440 /* Starting PHY firmware execution */
1441 clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET,
1442 DDR_PHY_MICRORESET_STALL);
1443}
1444
1445static int get_mail(struct ddr_handoff *handoff, enum message_mode mode,
1446 u32 *message_id)
1447{
1448 int ret;
1449
1450 /* Polling major messages from PMU */
1451 ret = wait_for_bit_le16((const void *)(handoff->phy_base +
1452 DDR_PHY_UCTSHADOWREGS_OFFSET),
1453 DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
1454 false, TIMEOUT_200MS, false);
1455 if (ret) {
1456 debug("%s: Timeout while waiting for",
1457 __func__);
1458 debug(" major messages from PMU\n");
1459 return ret;
1460 }
1461
1462 *message_id = readw((uintptr_t)(handoff->phy_base +
1463 DDR_PHY_UCTWRITEONLYSHADOW_OFFSET));
1464
1465 if (mode == STREAMING_MESSAGE)
1466 *message_id |= readw((uintptr_t)((handoff->phy_base +
1467 DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET))) <<
1468 SZ_16;
1469
1470 /* Ack the receipt of the major message */
1471 clrbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1472 DDR_PHY_DCTWRITEPROT);
1473
1474 ret = wait_for_bit_le16((const void *)(handoff->phy_base +
1475 DDR_PHY_UCTSHADOWREGS_OFFSET),
1476 DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
1477 true, TIMEOUT_200MS, false);
1478 if (ret) {
1479 debug("%s: Timeout while waiting for",
1480 __func__);
1481 debug(" ack the receipt of the major message completed\n");
1482 return ret;
1483 }
1484
1485 /* Complete protocol */
1486 setbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1487 DDR_PHY_DCTWRITEPROT);
1488
1489 return ret;
1490}
1491
1492static int get_mail_streaming(struct ddr_handoff *handoff,
1493 enum message_mode mode, u32 *index)
1494{
1495 int ret;
1496
1497 *index = readw((uintptr_t)(handoff->phy_base +
1498 DDR_PHY_UCTWRITEONLYSHADOW_OFFSET));
1499
1500 if (mode == STREAMING_MESSAGE)
1501 *index |= readw((uintptr_t)((handoff->phy_base +
1502 DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET))) <<
1503 SZ_16;
1504
1505 /* Ack the receipt of the major message */
1506 clrbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1507 DDR_PHY_DCTWRITEPROT);
1508
1509 ret = wait_for_bit_le16((const void *)(handoff->phy_base +
1510 DDR_PHY_UCTSHADOWREGS_OFFSET),
1511 DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW,
1512 true, TIMEOUT_200MS, false);
1513 if (ret) {
1514 debug("%s: Timeout while waiting for",
1515 __func__);
1516 debug(" ack the receipt of the major message completed\n");
1517 return ret;
1518 }
1519
1520 /* Complete protocol */
1521 setbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET,
1522 DDR_PHY_DCTWRITEPROT);
1523
1524 return 0;
1525}
1526
1527static int decode_streaming_message(struct ddr_handoff *ddr_handoff_info,
1528 u32 *streaming_index)
1529{
1530 int i = 0, ret;
1531 u32 temp;
1532
1533 temp = *streaming_index;
1534
1535 while (i < GET_LOWHW_DATA(temp)) {
1536 ret = get_mail(ddr_handoff_info, STREAMING_MESSAGE,
1537 streaming_index);
1538 if (ret)
1539 return ret;
1540
1541 printf("args[%d]: 0x%x ", i, *streaming_index);
1542 i++;
1543 }
1544
1545 return 0;
1546}
1547
1548static int poll_for_training_complete(struct ddr_handoff *ddr_handoff_info)
1549{
1550 int ret;
1551 u32 message_id = 0;
1552 u32 streaming_index = 0;
1553
1554 do {
1555 ret = get_mail(ddr_handoff_info, MAJOR_MESSAGE, &message_id);
1556 if (ret)
1557 return ret;
1558
1559 printf("Major message id = 0%x\n", message_id);
1560
1561 if (message_id == FW_STREAMING_MSG_ID) {
1562 ret = get_mail_streaming(ddr_handoff_info,
1563 STREAMING_MESSAGE,
1564 &streaming_index);
1565 if (ret)
1566 return ret;
1567
1568 printf("streaming index 0%x : ", streaming_index);
1569
1570 decode_streaming_message(ddr_handoff_info,
1571 &streaming_index);
1572
1573 printf("\n");
1574 }
1575 } while ((message_id != FW_TRAINING_COMPLETED_STAT) &&
1576 (message_id != FW_TRAINING_FAILED_STAT));
1577
1578 if (message_id == FW_TRAINING_COMPLETED_STAT) {
1579 printf("DDR firmware training completed\n");
1580 } else if (message_id == FW_TRAINING_FAILED_STAT) {
1581 printf("DDR firmware training failed\n");
1582 hang();
1583 }
1584
1585 return 0;
1586}
1587
1588static void enable_phy_clk_for_csr_access(struct ddr_handoff *handoff,
1589 bool enable)
1590{
1591 if (enable) {
1592 /* Enable PHY clk */
1593 setbits_le16((uintptr_t)(handoff->phy_base +
1594 DDR_PHY_UCCLKHCLKENABLES_OFFSET),
1595 DDR_PHY_UCCLKHCLKENABLES_UCCLKEN |
1596 DDR_PHY_UCCLKHCLKENABLES_HCLKEN);
1597 } else {
1598 /* Disable PHY clk */
1599 clrbits_le16((uintptr_t)(handoff->phy_base +
1600 DDR_PHY_UCCLKHCLKENABLES_OFFSET),
1601 DDR_PHY_UCCLKHCLKENABLES_UCCLKEN |
1602 DDR_PHY_UCCLKHCLKENABLES_HCLKEN);
1603 }
1604}
1605
1606/* helper function for updating train result to umctl2 RANKCTL register */
1607static void set_cal_res_to_rankctrl(u32 reg_addr, u16 update_value,
1608 u32 mask, u32 msb_mask, u32 shift)
1609{
1610 u32 reg, value;
1611
1612 reg = readl((uintptr_t)reg_addr);
1613
1614 debug("max value divided by 2 is 0x%x\n", update_value);
1615 debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
1616 debug("update with train result\n");
1617
1618 value = (reg & mask) >> shift;
1619
1620 value += update_value + 3;
1621
1622 /* reg value greater than 0xF, set one to diff_rank_wr_gap_msb */
1623 if (value > 0xF)
1624 setbits_le32((u32 *)(uintptr_t)reg_addr, msb_mask);
1625 else
1626 clrbits_le32((u32 *)(uintptr_t)reg_addr, msb_mask);
1627
1628 reg = readl((uintptr_t)reg_addr);
1629
1630 value = (value << shift) & mask;
1631
1632 /* update register */
1633 writel((reg & (~mask)) | value, (uintptr_t)reg_addr);
1634
1635 reg = readl((uintptr_t)reg_addr);
1636 debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
1637 debug("update with train result\n");
1638}
1639
1640/* helper function for updating train result to register */
1641static void set_cal_res_to_reg(u32 reg_addr, u16 update_value, u32 mask,
1642 u32 shift)
1643{
1644 u32 reg, value;
1645
1646 reg = readl((uintptr_t)reg_addr);
1647
1648 debug("max value divided by 2 is 0x%x\n", update_value);
1649 debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
1650 debug("update with train result\n");
1651
1652 value = (reg & mask) >> shift;
1653
1654 value = ((value + update_value + 3) << shift) & mask;
1655
1656 /* update register */
1657 writel((reg & (~mask)) | value, (uintptr_t)reg_addr);
1658
1659 reg = readl((uintptr_t)reg_addr);
1660 debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg);
1661 debug("update with train result\n");
1662}
1663
1664static u16 get_max_txdqsdlytg0_ux_p0(struct ddr_handoff *handoff, u32 reg,
1665 u8 numdbyte, u16 upd_val)
1666{
1667 u32 b_addr;
1668 u16 val;
1669 u8 byte;
1670
1671 /* Getting max value from DBYTEx TxDqsDlyTg0_ux_p0 */
1672 for (byte = 0; byte < numdbyte; byte++) {
1673 b_addr = byte << 13;
1674
1675 /* TxDqsDlyTg0[9:6] is the coarse delay */
1676 val = (readw((uintptr_t)(handoff->phy_base +
1677 reg + b_addr)) &
1678 DDR_PHY_TXDQDLYTG0_COARSE_DELAY) >>
1679 DDR_PHY_TXDQDLYTG0_COARSE_DELAY_SHIFT;
1680
1681 upd_val = max(val, upd_val);
1682 }
1683
1684 return upd_val;
1685}
1686
1687static int set_cal_res_to_umctl2(struct ddr_handoff *handoff,
1688 phys_addr_t umctl2_base,
1689 enum ddr_type umctl2_type)
1690{
1691 int ret;
1692 u8 numdbyte = 0x8;
1693 u16 upd_val, val;
1694 u32 dramtmg2_reg_addr, rankctl_reg_addr, reg_addr;
1695
1696 /* Enable quasi-dynamic programing of the controller registers */
1697 clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1698
1699 ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type);
1700 if (ret)
1701 return ret;
1702
1703 /* Enable access to the PHY configuration registers */
1704 clrbits_le16(handoff->phy_base + DDR_PHY_APBONLY0_OFFSET,
1705 DDR_PHY_MICROCONTMUXSEL);
1706
1707 if (umctl2_type == DDRTYPE_DDR4) {
1708 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1709 DMEM_MB_CDD_WW_1_0_OFFSET)));
1710
1711 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1712 DMEM_MB_CDD_WW_0_1_OFFSET)));
1713 } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
1714 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1715 DMEM_MB_CDD_CHA_WW_1_0_OFFSET)));
1716
1717 upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1718 DMEM_MB_CDD_CHA_WW_0_1_OFFSET)));
1719 } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
1720 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1721 DMEM_MB_CDD_CHB_WW_1_0_OFFSET)));
1722
1723 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1724 DMEM_MB_CDD_CHB_WW_0_1_OFFSET)));
1725 }
1726
1727 upd_val = max(val, upd_val);
1728 debug("max value is 0x%x\n", upd_val);
1729
1730 /* Divided by two is required when running in freq ratio 1:2 */
1731 if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
1732 upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
1733
1734 debug("Update train value to umctl2 RANKCTL.diff_rank_wr_gap\n");
1735 rankctl_reg_addr = umctl2_base + DDR4_RANKCTL_OFFSET;
1736 /* Update train value to umctl2 RANKCTL.diff_rank_wr_gap */
1737 set_cal_res_to_rankctrl(rankctl_reg_addr, upd_val,
1738 DDR4_RANKCTL_DIFF_RANK_WR_GAP,
1739 DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB,
1740 DDR4_RANKCTL_DIFF_RANK_WR_GAP_SHIFT);
1741
1742 debug("Update train value to umctl2 DRAMTMG2.W2RD\n");
1743 dramtmg2_reg_addr = umctl2_base + DDR4_DRAMTMG2_OFFSET;
1744 /* Update train value to umctl2 dramtmg2.wr2rd */
1745 set_cal_res_to_reg(dramtmg2_reg_addr, upd_val, DDR4_DRAMTMG2_WR2RD, 0);
1746
1747 if (umctl2_type == DDRTYPE_DDR4) {
1748 debug("Update train value to umctl2 DRAMTMG9.W2RD_S\n");
1749 reg_addr = umctl2_base + DDR4_DRAMTMG9_OFFSET;
1750 /* Update train value to umctl2 dramtmg9.wr2rd_s */
1751 set_cal_res_to_reg(reg_addr, upd_val, DDR4_DRAMTMG9_W2RD_S, 0);
1752 }
1753
1754 if (umctl2_type == DDRTYPE_DDR4) {
1755 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1756 DMEM_MB_CDD_RR_1_0_OFFSET)));
1757
1758 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1759 DMEM_MB_CDD_RR_0_1_OFFSET)));
1760 } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
1761 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1762 DMEM_MB_CDD_CHA_RR_1_0_OFFSET)));
1763
1764 upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1765 DMEM_MB_CDD_CHA_RR_0_1_OFFSET)));
1766 } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
1767 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1768 DMEM_MB_CDD_CHB_RR_1_0_OFFSET)));
1769
1770 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1771 DMEM_MB_CDD_CHB_RR_0_1_OFFSET)));
1772 }
1773
1774 upd_val = max(val, upd_val);
1775 debug("max value is 0x%x\n", upd_val);
1776
1777 /* Divided by two is required when running in freq ratio 1:2 */
1778 if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
1779 upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
1780
1781 debug("Update train value to umctl2 RANKCTL.diff_rank_rd_gap\n");
1782 /* Update train value to umctl2 RANKCTL.diff_rank_rd_gap */
1783 set_cal_res_to_rankctrl(rankctl_reg_addr, upd_val,
1784 DDR4_RANKCTL_DIFF_RANK_RD_GAP,
1785 DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB,
1786 DDR4_RANKCTL_DIFF_RANK_RD_GAP_SHIFT);
1787
1788 if (umctl2_type == DDRTYPE_DDR4) {
1789 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1790 DMEM_MB_CDD_RW_1_1_OFFSET)));
1791
1792 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1793 DMEM_MB_CDD_RW_1_0_OFFSET)));
1794
1795 upd_val = max(val, upd_val);
1796
1797 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1798 DMEM_MB_CDD_RW_0_1_OFFSET)));
1799
1800 upd_val = max(val, upd_val);
1801
1802 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1803 DMEM_MB_CDD_RW_0_0_OFFSET)));
1804
1805 upd_val = max(val, upd_val);
1806 } else if (umctl2_type == DDRTYPE_LPDDR4_0) {
1807 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1808 DMEM_MB_CDD_CHA_RW_1_1_OFFSET)));
1809
1810 upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1811 DMEM_MB_CDD_CHA_RW_1_0_OFFSET)));
1812
1813 upd_val = max(val, upd_val);
1814
1815 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1816 DMEM_MB_CDD_CHA_RW_0_1_OFFSET)));
1817
1818 upd_val = max(val, upd_val);
1819
1820 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1821 DMEM_MB_CDD_CHA_RW_0_0_OFFSET)));
1822
1823 upd_val = max(val, upd_val);
1824 } else if (umctl2_type == DDRTYPE_LPDDR4_1) {
1825 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1826 DMEM_MB_CDD_CHB_RW_1_1_OFFSET)));
1827
1828 upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1829 DMEM_MB_CDD_CHB_RW_1_0_OFFSET)));
1830
1831 upd_val = max(val, upd_val);
1832
1833 val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base +
1834 DMEM_MB_CDD_CHB_RW_0_1_OFFSET)));
1835
1836 upd_val = max(val, upd_val);
1837
1838 val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base +
1839 DMEM_MB_CDD_CHB_RW_0_0_OFFSET)));
1840
1841 upd_val = max(val, upd_val);
1842 }
1843
1844 debug("max value is 0x%x\n", upd_val);
1845
1846 /* Divided by two is required when running in freq ratio 1:2 */
1847 if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
1848 upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
1849
1850 debug("Update train value to umctl2 dramtmg2.rd2wr\n");
1851 /* Update train value to umctl2 dramtmg2.rd2wr */
1852 set_cal_res_to_reg(dramtmg2_reg_addr, upd_val, DDR4_DRAMTMG2_RD2WR,
1853 DDR4_DRAMTMG2_RD2WR_SHIFT);
1854
1855 /* Checking ECC is enabled?, lpddr4 using inline ECC */
1856 val = readl(umctl2_base + DDR4_ECCCFG0_OFFSET) & DDR4_ECC_MODE;
1857 if (val && umctl2_type == DDRTYPE_DDR4)
1858 numdbyte = 0x9;
1859
1860 upd_val = 0;
1861
1862 /* Getting max value from DBYTEx TxDqsDlyTg0_u0_p0 */
1863 upd_val = get_max_txdqsdlytg0_ux_p0(handoff,
1864 DDR_PHY_DBYTE0_TXDQDLYTG0_U0_P0,
1865 numdbyte, upd_val);
1866
1867 /* Getting max value from DBYTEx TxDqsDlyTg0_u1_p0 */
1868 upd_val = get_max_txdqsdlytg0_ux_p0(handoff,
1869 DDR_PHY_DBYTE0_TXDQDLYTG0_U1_P0,
1870 numdbyte, upd_val);
1871
1872 debug("TxDqsDlyTg0 max value is 0x%x\n", upd_val);
1873
1874 /* Divided by two is required when running in freq ratio 1:2 */
1875 if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO))
1876 upd_val = DIV_ROUND_CLOSEST(upd_val, 2);
1877
1878 reg_addr = umctl2_base + DDR4_DFITMG1_OFFSET;
1879 /* Update train value to umctl2 dfitmg1.dfi_wrdata_delay */
1880 set_cal_res_to_reg(reg_addr, upd_val, DDR4_DFITMG1_DFI_T_WRDATA_DELAY,
1881 DDR4_DFITMG1_DFI_T_WRDATA_SHIFT);
1882
1883 /* Complete quasi-dynamic register programming */
1884 setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE);
1885
1886 /* Polling programming done */
1887 ret = wait_for_bit_le32((const void *)(umctl2_base +
1888 DDR4_SWSTAT_OFFSET), DDR4_SWSTAT_SW_DONE_ACK,
1889 true, TIMEOUT_200MS, false);
1890 if (ret) {
1891 debug("%s: Timeout while waiting for", __func__);
1892 debug(" programming done\n");
1893 }
1894
1895 /* Isolate the APB access from internal CSRs */
1896 setbits_le16(handoff->phy_base + DDR_PHY_APBONLY0_OFFSET,
1897 DDR_PHY_MICROCONTMUXSEL);
1898
1899 return ret;
1900}
1901
1902static int update_training_result(struct ddr_handoff *ddr_handoff_info)
1903{
1904 int ret = 0;
1905
1906 /* Updating training result to first DDR controller */
1907 if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
1908 ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
1909 ret = set_cal_res_to_umctl2(ddr_handoff_info,
1910 ddr_handoff_info->cntlr_base,
1911 ddr_handoff_info->cntlr_t);
1912 if (ret) {
1913 debug("%s: Failed to update train result to ",
1914 __func__);
1915 debug("first DDR controller\n");
1916 return ret;
1917 }
1918 }
1919
1920 /* Updating training result to 2nd DDR controller */
1921 if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
1922 ret = set_cal_res_to_umctl2(ddr_handoff_info,
1923 ddr_handoff_info->cntlr2_base,
1924 ddr_handoff_info->cntlr2_t);
1925 if (ret) {
1926 debug("%s: Failed to update train result to ",
1927 __func__);
1928 debug("2nd DDR controller\n");
1929 }
1930 }
1931
1932 return ret;
1933}
1934
1935static int start_ddr_calibration(struct ddr_handoff *ddr_handoff_info)
1936{
1937 int ret;
1938
1939 /* Implement 1D training firmware */
1940 ret = configure_training_firmware(ddr_handoff_info,
1941 (const void *)SOC64_HANDOFF_DDR_TRAIN_IMEM_1D_SECTION,
1942 (const void *)SOC64_HANDOFF_DDR_TRAIN_DMEM_1D_SECTION);
1943 if (ret) {
1944 debug("%s: Failed to configure 1D training firmware\n",
1945 __func__);
1946 return ret;
1947 }
1948
1949 calibrating_sdram(ddr_handoff_info);
1950
1951 ret = poll_for_training_complete(ddr_handoff_info);
1952 if (ret) {
1953 debug("%s: Failed to get FW training completed\n",
1954 __func__);
1955 return ret;
1956 }
1957
1958 /* Updating training result to DDR controller */
1959 ret = update_training_result(ddr_handoff_info);
1960 if (ret)
1961 return ret;
1962
1963 /* Implement 2D training firmware */
1964 ret = configure_training_firmware(ddr_handoff_info,
1965 (const void *)SOC64_HANDOFF_DDR_TRAIN_IMEM_2D_SECTION,
1966 (const void *)SOC64_HANDOFF_DDR_TRAIN_DMEM_2D_SECTION);
1967 if (ret) {
1968 debug("%s: Failed to update train result to ", __func__);
1969 debug("DDR controller\n");
1970 return ret;
1971 }
1972
1973 calibrating_sdram(ddr_handoff_info);
1974
1975 ret = poll_for_training_complete(ddr_handoff_info);
1976 if (ret)
1977 debug("%s: Failed to get FW training completed\n",
1978 __func__);
1979
1980 return ret;
1981}
1982
1983static int init_controller(struct ddr_handoff *ddr_handoff_info,
1984 u32 *user_backup, u32 *user_backup_2nd)
1985{
1986 int ret = 0;
1987
1988 if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 ||
1989 ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) {
1990 /* Initialize 1st DDR controller */
1991 ret = init_umctl2(ddr_handoff_info->cntlr_handoff_base,
1992 ddr_handoff_info->cntlr_base,
1993 ddr_handoff_info->cntlr_t,
1994 ddr_handoff_info->cntlr_handoff_length,
1995 user_backup);
1996 if (ret) {
1997 debug("%s: Failed to inilialize first controller\n",
1998 __func__);
1999 return ret;
2000 }
2001 }
2002
2003 if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) {
2004 /* Initialize 2nd DDR controller */
2005 ret = init_umctl2(ddr_handoff_info->cntlr2_handoff_base,
2006 ddr_handoff_info->cntlr2_base,
2007 ddr_handoff_info->cntlr2_t,
2008 ddr_handoff_info->cntlr2_handoff_length,
2009 user_backup_2nd);
2010 if (ret)
2011 debug("%s: Failed to inilialize 2nd controller\n",
2012 __func__);
2013 }
2014
2015 return ret;
2016}
2017
2018static int dfi_init(struct ddr_handoff *ddr_handoff_info)
2019{
2020 int ret;
2021
2022 ret = ddr_start_dfi_init(ddr_handoff_info->cntlr_base,
2023 ddr_handoff_info->cntlr_t);
2024 if (ret)
2025 return ret;
2026
2027 if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1)
2028 ret = ddr_start_dfi_init(ddr_handoff_info->cntlr2_base,
2029 ddr_handoff_info->cntlr2_t);
2030
2031 return ret;
2032}
2033
2034static int check_dfi_init(struct ddr_handoff *handoff)
2035{
2036 int ret;
2037
2038 ret = ddr_check_dfi_init_complete(handoff->cntlr_base,
2039 handoff->cntlr_t);
2040 if (ret)
2041 return ret;
2042
2043 if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
2044 ret = ddr_check_dfi_init_complete(handoff->cntlr2_base,
2045 handoff->cntlr2_t);
2046
2047 return ret;
2048}
2049
2050static int trigger_sdram_init(struct ddr_handoff *handoff)
2051{
2052 int ret;
2053
2054 ret = ddr_trigger_sdram_init(handoff->cntlr_base,
2055 handoff->cntlr_t);
2056 if (ret)
2057 return ret;
2058
2059 if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
2060 ret = ddr_trigger_sdram_init(handoff->cntlr2_base,
2061 handoff->cntlr2_t);
2062
2063 return ret;
2064}
2065
2066static int ddr_post_config(struct ddr_handoff *handoff)
2067{
2068 int ret;
2069
2070 ret = ddr_post_handoff_config(handoff->cntlr_base,
2071 handoff->cntlr_t);
2072 if (ret)
2073 return ret;
2074
2075 if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1)
2076 ret = ddr_post_handoff_config(handoff->cntlr2_base,
2077 handoff->cntlr2_t);
2078
2079 return ret;
2080}
2081
2082static bool is_ddr_retention_enabled(u32 boot_scratch_cold0_reg)
2083{
2084 return boot_scratch_cold0_reg &
2085 ALT_SYSMGR_SCRATCH_REG_0_DDR_RETENTION_MASK;
2086}
2087
2088static bool is_ddr_bitstream_sha_matching(u32 boot_scratch_cold0_reg)
2089{
2090 return boot_scratch_cold0_reg & ALT_SYSMGR_SCRATCH_REG_0_DDR_SHA_MASK;
2091}
2092
2093static enum reset_type get_reset_type(u32 boot_scratch_cold0_reg)
2094{
2095 return (boot_scratch_cold0_reg &
2096 ALT_SYSMGR_SCRATCH_REG_0_DDR_RESET_TYPE_MASK) >>
2097 ALT_SYSMGR_SCRATCH_REG_0_DDR_RESET_TYPE_SHIFT;
2098}
2099
2100void reset_type_debug_print(u32 boot_scratch_cold0_reg)
2101{
2102 switch (get_reset_type(boot_scratch_cold0_reg)) {
2103 case POR_RESET:
2104 debug("%s: POR is triggered\n", __func__);
2105 break;
2106 case WARM_RESET:
2107 debug("%s: Warm reset is triggered\n", __func__);
2108 break;
2109 case COLD_RESET:
2110 debug("%s: Cold reset is triggered\n", __func__);
2111 break;
2112 default:
2113 debug("%s: Invalid reset type\n", __func__);
2114 }
2115}
2116
2117bool is_ddr_init(void)
2118{
2119 u32 reg = readl(socfpga_get_sysmgr_addr() +
2120 SYSMGR_SOC64_BOOT_SCRATCH_COLD0);
2121
2122 reset_type_debug_print(reg);
2123
2124 if (get_reset_type(reg) == POR_RESET) {
2125 debug("%s: DDR init is required\n", __func__);
2126 return true;
2127 }
2128
2129 if (get_reset_type(reg) == WARM_RESET) {
2130 debug("%s: DDR init is skipped\n", __func__);
2131 return false;
2132 }
2133
2134 if (get_reset_type(reg) == COLD_RESET) {
2135 if (is_ddr_retention_enabled(reg) &&
2136 is_ddr_bitstream_sha_matching(reg)) {
2137 debug("%s: DDR retention bit is set\n", __func__);
2138 debug("%s: Matching in DDR bistream\n", __func__);
2139 debug("%s: DDR init is skipped\n", __func__);
2140 return false;
2141 }
2142 }
2143
2144 debug("%s: DDR init is required\n", __func__);
2145 return true;
2146}
2147
2148int sdram_mmr_init_full(struct udevice *dev)
2149{
2150 u32 user_backup[2], user_backup_2nd[2];
2151 int ret;
2152 struct bd_info bd;
2153 struct ddr_handoff ddr_handoff_info;
2154 struct altera_sdram_priv *priv = dev_get_priv(dev);
2155
2156 printf("Checking SDRAM configuration in progress ...\n");
2157 ret = populate_ddr_handoff(&ddr_handoff_info);
2158 if (ret) {
2159 debug("%s: Failed to populate DDR handoff\n",
2160 __func__);
2161 return ret;
2162 }
2163
2164 /* Set the MPFE NoC mux to correct DDR controller type */
2165 use_ddr4(ddr_handoff_info.cntlr_t);
2166
2167 if (is_ddr_init()) {
2168 printf("SDRAM init in progress ...\n");
2169
2170 /*
2171 * Polling reset complete, must be high to ensure DDR subsystem
2172 * in complete reset state before init DDR clock and DDR
2173 * controller
2174 */
2175 ret = wait_for_bit_le32((const void *)((uintptr_t)(readl
2176 (ddr_handoff_info.mem_reset_base) +
2177 MEM_RST_MGR_STATUS)),
2178 MEM_RST_MGR_STATUS_RESET_COMPLETE,
2179 true, TIMEOUT_200MS, false);
2180 if (ret) {
2181 debug("%s: Timeout while waiting for", __func__);
2182 debug(" reset complete done\n");
2183 return ret;
2184 }
2185
2186 ret = enable_ddr_clock(dev);
2187 if (ret)
2188 return ret;
2189
2190 ret = init_controller(&ddr_handoff_info, user_backup,
2191 user_backup_2nd);
2192 if (ret) {
2193 debug("%s: Failed to inilialize DDR controller\n",
2194 __func__);
2195 return ret;
2196 }
2197
2198 /* Release the controller from reset */
2199 setbits_le32((uintptr_t)
2200 (readl(ddr_handoff_info.mem_reset_base) +
2201 MEM_RST_MGR_STATUS), MEM_RST_MGR_STATUS_AXI_RST |
2202 MEM_RST_MGR_STATUS_CONTROLLER_RST |
2203 MEM_RST_MGR_STATUS_RESET_COMPLETE);
2204
2205 printf("DDR controller configuration is completed\n");
2206
2207 /* Initialize DDR PHY */
2208 ret = init_phy(&ddr_handoff_info);
2209 if (ret) {
2210 debug("%s: Failed to inilialize DDR PHY\n", __func__);
2211 return ret;
2212 }
2213
2214 enable_phy_clk_for_csr_access(&ddr_handoff_info, true);
2215
2216 ret = start_ddr_calibration(&ddr_handoff_info);
2217 if (ret) {
2218 debug("%s: Failed to calibrate DDR\n", __func__);
2219 return ret;
2220 }
2221
2222 enable_phy_clk_for_csr_access(&ddr_handoff_info, false);
2223
2224 /* Reset ARC processor when no using for security purpose */
2225 setbits_le16(ddr_handoff_info.phy_base +
2226 DDR_PHY_MICRORESET_OFFSET,
2227 DDR_PHY_MICRORESET_RESET);
2228
2229 /* DDR freq set to support DDR4-3200 */
2230 phy_init_engine(&ddr_handoff_info);
2231
2232 ret = dfi_init(&ddr_handoff_info);
2233 if (ret)
2234 return ret;
2235
2236 ret = check_dfi_init(&ddr_handoff_info);
2237 if (ret)
2238 return ret;
2239
2240 ret = trigger_sdram_init(&ddr_handoff_info);
2241 if (ret)
2242 return ret;
2243
2244 ret = ddr_post_config(&ddr_handoff_info);
2245 if (ret)
2246 return ret;
2247
2248 /* Restore user settings */
2249 writel(user_backup[0], ddr_handoff_info.cntlr_base +
2250 DDR4_PWRCTL_OFFSET);
2251
2252 if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_0)
2253 setbits_le32(ddr_handoff_info.cntlr_base +
2254 DDR4_INIT0_OFFSET, user_backup[1]);
2255
2256 if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_1) {
2257 /* Restore user settings */
2258 writel(user_backup_2nd[0],
2259 ddr_handoff_info.cntlr2_base +
2260 DDR4_PWRCTL_OFFSET);
2261
2262 setbits_le32(ddr_handoff_info.cntlr2_base +
2263 DDR4_INIT0_OFFSET, user_backup_2nd[1]);
2264 }
2265
2266 /* Enable input traffic per port */
2267 setbits_le32(ddr_handoff_info.cntlr_base + DDR4_PCTRL0_OFFSET,
2268 DDR4_PCTRL0_PORT_EN);
2269
2270 if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_1) {
2271 /* Enable input traffic per port */
2272 setbits_le32(ddr_handoff_info.cntlr2_base +
2273 DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
2274 }
2275
2276 printf("DDR init success\n");
2277 }
2278
2279 /* Get bank configuration from devicetree */
2280 ret = fdtdec_decode_ram_size(gd->fdt_blob, NULL, 0, NULL,
2281 (phys_size_t *)&gd->ram_size, &bd);
2282 if (ret) {
2283 debug("%s: Failed to decode memory node\n", __func__);
2284 return -1;
2285 }
2286
2287 printf("DDR: %lld MiB\n", gd->ram_size >> 20);
2288
2289 priv->info.base = bd.bi_dram[0].start;
2290 priv->info.size = gd->ram_size;
2291
2292 sdram_size_check(&bd);
2293
2294 sdram_set_firewall(&bd);
2295
2296 return 0;
2297}