blob: 985a108b1c5d893ad28825c935dd89321ec04289 [file] [log] [blame]
Ley Foon Tan25572cf2019-11-27 15:55:26 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2016-2019 Intel Corporation <www.intel.com>
4 *
5 */
6
7#include <common.h>
8#include <cpu_func.h>
9#include <dm.h>
10#include <errno.h>
11#include <div64.h>
12#include <fdtdec.h>
13#include <ram.h>
14#include <reset.h>
15#include "sdram_soc64.h"
16#include <wait_bit.h>
17#include <asm/arch/firewall.h>
18#include <asm/arch/system_manager.h>
19#include <asm/arch/reset_manager.h>
20#include <asm/io.h>
21#include <linux/sizes.h>
22
23#define PGTABLE_OFF 0x4000
24
25u32 hmc_readl(struct altera_sdram_platdata *plat, u32 reg)
26{
27 return readl(plat->iomhc + reg);
28}
29
30u32 hmc_ecc_readl(struct altera_sdram_platdata *plat, u32 reg)
31{
32 return readl(plat->hmc + reg);
33}
34
35u32 hmc_ecc_writel(struct altera_sdram_platdata *plat,
36 u32 data, u32 reg)
37{
38 return writel(data, plat->hmc + reg);
39}
40
41u32 ddr_sch_writel(struct altera_sdram_platdata *plat, u32 data,
42 u32 reg)
43{
44 return writel(data, plat->ddr_sch + reg);
45}
46
47int emif_clear(struct altera_sdram_platdata *plat)
48{
49 hmc_ecc_writel(plat, 0, RSTHANDSHAKECTRL);
50
51 return wait_for_bit_le32((const void *)(plat->hmc +
52 RSTHANDSHAKESTAT),
53 DDR_HMC_RSTHANDSHAKE_MASK,
54 false, 1000, false);
55}
56
57int emif_reset(struct altera_sdram_platdata *plat)
58{
59 u32 c2s, s2c, ret;
60
61 c2s = hmc_ecc_readl(plat, RSTHANDSHAKECTRL) & DDR_HMC_RSTHANDSHAKE_MASK;
62 s2c = hmc_ecc_readl(plat, RSTHANDSHAKESTAT) & DDR_HMC_RSTHANDSHAKE_MASK;
63
64 debug("DDR: c2s=%08x s2c=%08x nr0=%08x nr1=%08x nr2=%08x dst=%08x\n",
65 c2s, s2c, hmc_readl(plat, NIOSRESERVED0),
66 hmc_readl(plat, NIOSRESERVED1), hmc_readl(plat, NIOSRESERVED2),
67 hmc_readl(plat, DRAMSTS));
68
69 if (s2c && emif_clear(plat)) {
70 printf("DDR: emif_clear() failed\n");
71 return -1;
72 }
73
74 debug("DDR: Triggerring emif reset\n");
75 hmc_ecc_writel(plat, DDR_HMC_CORE2SEQ_INT_REQ, RSTHANDSHAKECTRL);
76
77 /* if seq2core[3] = 0, we are good */
78 ret = wait_for_bit_le32((const void *)(plat->hmc +
79 RSTHANDSHAKESTAT),
80 DDR_HMC_SEQ2CORE_INT_RESP_MASK,
81 false, 1000, false);
82 if (ret) {
83 printf("DDR: failed to get ack from EMIF\n");
84 return ret;
85 }
86
87 ret = emif_clear(plat);
88 if (ret) {
89 printf("DDR: emif_clear() failed\n");
90 return ret;
91 }
92
93 debug("DDR: %s triggered successly\n", __func__);
94 return 0;
95}
96
97int poll_hmc_clock_status(void)
98{
99 return wait_for_bit_le32((const void *)(socfpga_get_sysmgr_addr() +
100 SYSMGR_SOC64_HMC_CLK),
101 SYSMGR_HMC_CLK_STATUS_MSK, true, 1000, false);
102}
103
104void sdram_clear_mem(phys_addr_t addr, phys_size_t size)
105{
106 phys_size_t i;
107
108 if (addr % CONFIG_SYS_CACHELINE_SIZE) {
109 printf("DDR: address 0x%llx is not cacheline size aligned.\n",
110 addr);
111 hang();
112 }
113
114 if (size % CONFIG_SYS_CACHELINE_SIZE) {
115 printf("DDR: size 0x%llx is not multiple of cacheline size\n",
116 size);
117 hang();
118 }
119
120 /* Use DC ZVA instruction to clear memory to zeros by a cache line */
121 for (i = 0; i < size; i = i + CONFIG_SYS_CACHELINE_SIZE) {
122 asm volatile("dc zva, %0"
123 :
124 : "r"(addr)
125 : "memory");
126 addr += CONFIG_SYS_CACHELINE_SIZE;
127 }
128}
129
130void sdram_init_ecc_bits(bd_t *bd)
131{
132 phys_size_t size, size_init;
133 phys_addr_t start_addr;
134 int bank = 0;
135 unsigned int start = get_timer(0);
136
137 icache_enable();
138
139 start_addr = bd->bi_dram[0].start;
140 size = bd->bi_dram[0].size;
141
142 /* Initialize small block for page table */
143 memset((void *)start_addr, 0, PGTABLE_SIZE + PGTABLE_OFF);
144 gd->arch.tlb_addr = start_addr + PGTABLE_OFF;
145 gd->arch.tlb_size = PGTABLE_SIZE;
146 start_addr += PGTABLE_SIZE + PGTABLE_OFF;
147 size -= (PGTABLE_OFF + PGTABLE_SIZE);
148 dcache_enable();
149
150 while (1) {
151 while (size) {
152 size_init = min((phys_addr_t)SZ_1G, (phys_addr_t)size);
153 sdram_clear_mem(start_addr, size_init);
154 size -= size_init;
155 start_addr += size_init;
156 WATCHDOG_RESET();
157 }
158
159 bank++;
160 if (bank >= CONFIG_NR_DRAM_BANKS)
161 break;
162
163 start_addr = bd->bi_dram[bank].start;
164 size = bd->bi_dram[bank].size;
165 }
166
167 dcache_disable();
168 icache_disable();
169
170 printf("SDRAM-ECC: Initialized success with %d ms\n",
171 (unsigned int)get_timer(start));
172}
173
174void sdram_size_check(bd_t *bd)
175{
176 phys_size_t total_ram_check = 0;
177 phys_size_t ram_check = 0;
178 phys_addr_t start = 0;
179 int bank;
180
181 /* Sanity check ensure correct SDRAM size specified */
182 debug("DDR: Running SDRAM size sanity check\n");
183
184 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
185 start = bd->bi_dram[bank].start;
186 while (ram_check < bd->bi_dram[bank].size) {
187 ram_check += get_ram_size((void *)(start + ram_check),
188 (phys_size_t)SZ_1G);
189 }
190 total_ram_check += ram_check;
191 ram_check = 0;
192 }
193
194 /* If the ram_size is 2GB smaller, we can assume the IO space is
195 * not mapped in. gd->ram_size is the actual size of the dram
196 * not the accessible size.
197 */
198 if (total_ram_check != gd->ram_size) {
199 puts("DDR: SDRAM size check failed!\n");
200 hang();
201 }
202
203 debug("DDR: SDRAM size check passed!\n");
204}
205
206/**
207 * sdram_calculate_size() - Calculate SDRAM size
208 *
209 * Calculate SDRAM device size based on SDRAM controller parameters.
210 * Size is specified in bytes.
211 */
212phys_size_t sdram_calculate_size(struct altera_sdram_platdata *plat)
213{
214 u32 dramaddrw = hmc_readl(plat, DRAMADDRW);
215
216 phys_size_t size = 1 << (DRAMADDRW_CFG_CS_ADDR_WIDTH(dramaddrw) +
217 DRAMADDRW_CFG_BANK_GRP_ADDR_WIDTH(dramaddrw) +
218 DRAMADDRW_CFG_BANK_ADDR_WIDTH(dramaddrw) +
219 DRAMADDRW_CFG_ROW_ADDR_WIDTH(dramaddrw) +
220 DRAMADDRW_CFG_COL_ADDR_WIDTH(dramaddrw));
221
222 size *= (2 << (hmc_ecc_readl(plat, DDRIOCTRL) &
223 DDR_HMC_DDRIOCTRL_IOSIZE_MSK));
224
225 return size;
226}
227
228static int altera_sdram_ofdata_to_platdata(struct udevice *dev)
229{
230 struct altera_sdram_platdata *plat = dev->platdata;
231 fdt_addr_t addr;
232
233 addr = dev_read_addr_index(dev, 0);
234 if (addr == FDT_ADDR_T_NONE)
235 return -EINVAL;
236 plat->ddr_sch = (void __iomem *)addr;
237
238 addr = dev_read_addr_index(dev, 1);
239 if (addr == FDT_ADDR_T_NONE)
240 return -EINVAL;
241 plat->iomhc = (void __iomem *)addr;
242
243 addr = dev_read_addr_index(dev, 2);
244 if (addr == FDT_ADDR_T_NONE)
245 return -EINVAL;
246 plat->hmc = (void __iomem *)addr;
247
248 return 0;
249}
250
251static int altera_sdram_probe(struct udevice *dev)
252{
253 int ret;
254 struct altera_sdram_priv *priv = dev_get_priv(dev);
255
256 ret = reset_get_bulk(dev, &priv->resets);
257 if (ret) {
258 dev_err(dev, "Can't get reset: %d\n", ret);
259 return -ENODEV;
260 }
261 reset_deassert_bulk(&priv->resets);
262
263 if (sdram_mmr_init_full(dev) != 0) {
264 puts("SDRAM init failed.\n");
265 goto failed;
266 }
267
268 return 0;
269
270failed:
271 reset_release_bulk(&priv->resets);
272 return -ENODEV;
273}
274
275static int altera_sdram_get_info(struct udevice *dev,
276 struct ram_info *info)
277{
278 struct altera_sdram_priv *priv = dev_get_priv(dev);
279
280 info->base = priv->info.base;
281 info->size = priv->info.size;
282
283 return 0;
284}
285
286static struct ram_ops altera_sdram_ops = {
287 .get_info = altera_sdram_get_info,
288};
289
290static const struct udevice_id altera_sdram_ids[] = {
291 { .compatible = "altr,sdr-ctl-s10" },
Ley Foon Tan4ddb9092019-11-27 15:55:27 +0800292 { .compatible = "intel,sdr-ctl-agilex" },
Ley Foon Tan25572cf2019-11-27 15:55:26 +0800293 { /* sentinel */ }
294};
295
296U_BOOT_DRIVER(altera_sdram) = {
297 .name = "altr_sdr_ctl",
298 .id = UCLASS_RAM,
299 .of_match = altera_sdram_ids,
300 .ops = &altera_sdram_ops,
301 .ofdata_to_platdata = altera_sdram_ofdata_to_platdata,
302 .platdata_auto_alloc_size = sizeof(struct altera_sdram_platdata),
303 .probe = altera_sdram_probe,
304 .priv_auto_alloc_size = sizeof(struct altera_sdram_priv),
305};