blob: 7f2cccb6af2020fd80434ef65361aca25aa82049 [file] [log] [blame]
Ley Foon Tan4ddb9092019-11-27 15:55:27 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Intel Corporation <www.intel.com>
4 *
5 */
6
Ley Foon Tan4ddb9092019-11-27 15:55:27 +08007#include <dm.h>
8#include <errno.h>
9#include <div64.h>
10#include <fdtdec.h>
Simon Glassf11478f2019-12-28 10:45:07 -070011#include <hang.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Ley Foon Tan4ddb9092019-11-27 15:55:27 +080013#include <ram.h>
14#include <reset.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060015#include <asm/global_data.h>
Ley Foon Tan4ddb9092019-11-27 15:55:27 +080016#include "sdram_soc64.h"
17#include <wait_bit.h>
18#include <asm/arch/firewall.h>
19#include <asm/arch/reset_manager.h>
20#include <asm/arch/system_manager.h>
21#include <asm/io.h>
22#include <linux/sizes.h>
23
24DECLARE_GLOBAL_DATA_PTR;
25
26int sdram_mmr_init_full(struct udevice *dev)
27{
Simon Glass95588622020-12-22 19:30:28 -070028 struct altera_sdram_plat *plat = dev_get_plat(dev);
Ley Foon Tan4ddb9092019-11-27 15:55:27 +080029 struct altera_sdram_priv *priv = dev_get_priv(dev);
30 u32 i;
31 int ret;
32 phys_size_t hw_size;
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +090033 struct bd_info bd = {0};
Ley Foon Tan4ddb9092019-11-27 15:55:27 +080034
35 /* Ensure HMC clock is running */
36 if (poll_hmc_clock_status()) {
37 debug("DDR: Error as HMC clock was not running\n");
38 return -EPERM;
39 }
40
41 /* Trying 3 times to do a calibration */
42 for (i = 0; i < 3; i++) {
43 ret = wait_for_bit_le32((const void *)(plat->hmc +
44 DDRCALSTAT),
45 DDR_HMC_DDRCALSTAT_CAL_MSK, true, 1000,
46 false);
47 if (!ret)
48 break;
49
50 emif_reset(plat);
51 }
52
53 if (ret) {
54 puts("DDR: Error as SDRAM calibration failed\n");
55 return -EPERM;
56 }
57 debug("DDR: Calibration success\n");
58
59 /*
60 * Configure the DDR IO size
61 * niosreserve0: Used to indicate DDR width &
62 * bit[7:0] = Number of data bits (bit[6:5] 0x01=32bit, 0x10=64bit)
63 * bit[8] = 1 if user-mode OCT is present
64 * bit[9] = 1 if warm reset compiled into EMIF Cal Code
65 * bit[10] = 1 if warm reset is on during generation in EMIF Cal
66 * niosreserve1: IP ADCDS version encoded as 16 bit value
67 * bit[2:0] = Variant (0=not special,1=FAE beta, 2=Customer beta,
68 * 3=EAP, 4-6 are reserved)
69 * bit[5:3] = Service Pack # (e.g. 1)
70 * bit[9:6] = Minor Release #
71 * bit[14:10] = Major Release #
72 */
73 /* Configure DDR IO size x16, x32 and x64 mode */
74 u32 update_value;
75
76 update_value = hmc_readl(plat, NIOSRESERVED0);
77 update_value = (update_value & 0xFF) >> 5;
78
79 /* Configure DDR data rate 0-HAlf-rate 1-Quarter-rate */
80 update_value |= (hmc_readl(plat, CTRLCFG3) & 0x4);
81 hmc_ecc_writel(plat, update_value, DDRIOCTRL);
82
83 /* Copy values MMR IOHMC dramaddrw to HMC adp DRAMADDRWIDTH */
84 hmc_ecc_writel(plat, hmc_readl(plat, DRAMADDRW), DRAMADDRWIDTH);
85
86 /* assigning the SDRAM size */
87 phys_size_t size = sdram_calculate_size(plat);
88
89 if (size <= 0)
90 hw_size = PHYS_SDRAM_1_SIZE;
91 else
92 hw_size = size;
93
94 /* Get bank configuration from devicetree */
95 ret = fdtdec_decode_ram_size(gd->fdt_blob, NULL, 0, NULL,
96 (phys_size_t *)&gd->ram_size, &bd);
97 if (ret) {
98 puts("DDR: Failed to decode memory node\n");
99 return -ENXIO;
100 }
101
102 if (gd->ram_size != hw_size) {
103 printf("DDR: Warning: DRAM size from device tree (%lld MiB)\n",
104 gd->ram_size >> 20);
105 printf(" mismatch with hardware (%lld MiB).\n",
106 hw_size >> 20);
107 }
108
109 if (gd->ram_size > hw_size) {
110 printf("DDR: Error: DRAM size from device tree is greater\n");
111 printf(" than hardware size.\n");
112 hang();
113 }
114
115 printf("DDR: %lld MiB\n", gd->ram_size >> 20);
116
117 /* This enables nonsecure access to DDR */
118 /* mpuregion0addr_limit */
119 FW_MPU_DDR_SCR_WRITEL(gd->ram_size - 1,
120 FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT);
121 FW_MPU_DDR_SCR_WRITEL(0x1F, FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT);
122
123 /* nonmpuregion0addr_limit */
124 FW_MPU_DDR_SCR_WRITEL(gd->ram_size - 1,
125 FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT);
126
127 /* Enable mpuregion0enable and nonmpuregion0enable */
128 FW_MPU_DDR_SCR_WRITEL(MPUREGION0_ENABLE | NONMPUREGION0_ENABLE,
129 FW_MPU_DDR_SCR_EN_SET);
130
131 u32 ctrlcfg1 = hmc_readl(plat, CTRLCFG1);
132
133 /* Enable or disable the DDR ECC */
134 if (CTRLCFG1_CFG_CTRL_EN_ECC(ctrlcfg1)) {
135 setbits_le32(plat->hmc + ECCCTRL1,
136 (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
137 DDR_HMC_ECCCTL_CNT_RST_SET_MSK |
138 DDR_HMC_ECCCTL_ECC_EN_SET_MSK));
139 clrbits_le32(plat->hmc + ECCCTRL1,
140 (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
141 DDR_HMC_ECCCTL_CNT_RST_SET_MSK));
142 setbits_le32(plat->hmc + ECCCTRL2,
143 (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK |
144 DDR_HMC_ECCCTL2_AWB_EN_SET_MSK));
145 setbits_le32(plat->hmc + ERRINTEN,
146 DDR_HMC_ERRINTEN_DERRINTEN_EN_SET_MSK);
147
Ley Foon Tan4ddb9092019-11-27 15:55:27 +0800148 if (!cpu_has_been_warmreset())
149 sdram_init_ecc_bits(&bd);
150 } else {
151 clrbits_le32(plat->hmc + ECCCTRL1,
152 (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
153 DDR_HMC_ECCCTL_CNT_RST_SET_MSK |
154 DDR_HMC_ECCCTL_ECC_EN_SET_MSK));
155 clrbits_le32(plat->hmc + ECCCTRL2,
156 (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK |
157 DDR_HMC_ECCCTL2_AWB_EN_SET_MSK));
158 }
159
Thor Thayer7ead4212019-12-06 13:47:32 -0600160 /* Enable non-secure reads/writes to HMC Adapter for SDRAM ECC */
161 writel(FW_HMC_ADAPTOR_MPU_MASK, FW_HMC_ADAPTOR_REG_ADDR);
162
Ley Foon Tan4ddb9092019-11-27 15:55:27 +0800163 sdram_size_check(&bd);
164
165 priv->info.base = bd.bi_dram[0].start;
166 priv->info.size = gd->ram_size;
167
168 debug("DDR: HMC init success\n");
169 return 0;
170}