blob: 17525691e6828b7e5a8e940e15f8a1d5c7085f96 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Stefan Roese05b17652016-05-17 15:00:30 +02002/*
3 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
Marek Behúnd63726e2022-06-01 17:17:06 +02004 * Copyright (C) 2020 Marek Behún <kabel@kernel.org>
Stefan Roese05b17652016-05-17 15:00:30 +02005 */
6
Simon Glassafb02152019-12-28 10:45:01 -07007#include <cpu_func.h>
Stefan Roese05b17652016-05-17 15:00:30 +02008#include <dm.h>
9#include <fdtdec.h>
Pali Rohár39fd2172021-05-26 17:59:38 +020010#include <fdt_support.h>
Simon Glass97589732020-05-10 11:40:02 -060011#include <init.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060012#include <asm/global_data.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060013#include <linux/bitops.h>
Masahiro Yamada75f82d02018-03-05 01:20:11 +090014#include <linux/libfdt.h>
Pali Rohár6b8e4262022-02-16 11:18:44 +010015#include <linux/sizes.h>
Stefan Roese05b17652016-05-17 15:00:30 +020016#include <asm/io.h>
17#include <asm/system.h>
18#include <asm/arch/cpu.h>
19#include <asm/arch/soc.h>
20#include <asm/armv8/mmu.h>
Marek Behúnf9d5e732020-04-08 19:25:19 +020021#include <sort.h>
Stefan Roese05b17652016-05-17 15:00:30 +020022
Stefan Roese05b17652016-05-17 15:00:30 +020023/* Armada 3700 */
24#define MVEBU_GPIO_NB_REG_BASE (MVEBU_REGISTER(0x13800))
25
26#define MVEBU_TEST_PIN_LATCH_N (MVEBU_GPIO_NB_REG_BASE + 0x8)
27#define MVEBU_XTAL_MODE_MASK BIT(9)
28#define MVEBU_XTAL_MODE_OFFS 9
29#define MVEBU_XTAL_CLOCK_25MHZ 0x0
30#define MVEBU_XTAL_CLOCK_40MHZ 0x1
31
32#define MVEBU_NB_WARM_RST_REG (MVEBU_GPIO_NB_REG_BASE + 0x40)
33#define MVEBU_NB_WARM_RST_MAGIC_NUM 0x1d1e
34
Marek Behúnf9d5e732020-04-08 19:25:19 +020035/* Armada 3700 CPU Address Decoder registers */
36#define MVEBU_CPU_DEC_WIN_REG_BASE (size_t)(MVEBU_REGISTER(0xcf00))
37#define MVEBU_CPU_DEC_WIN_CTRL(w) \
38 (MVEBU_CPU_DEC_WIN_REG_BASE + ((w) << 4))
39#define MVEBU_CPU_DEC_WIN_CTRL_EN BIT(0)
40#define MVEBU_CPU_DEC_WIN_CTRL_TGT_MASK 0xf
41#define MVEBU_CPU_DEC_WIN_CTRL_TGT_OFFS 4
42#define MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM 0
43#define MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE 2
44#define MVEBU_CPU_DEC_WIN_SIZE(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0x4)
45#define MVEBU_CPU_DEC_WIN_BASE(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0x8)
46#define MVEBU_CPU_DEC_WIN_REMAP(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0xc)
47#define MVEBU_CPU_DEC_WIN_GRANULARITY 16
48#define MVEBU_CPU_DEC_WINS 5
Pali Rohár6b8e4262022-02-16 11:18:44 +010049#define MVEBU_CPU_DEC_CCI_BASE (MVEBU_CPU_DEC_WIN_REG_BASE + 0xe0)
50#define MVEBU_CPU_DEC_ROM_BASE (MVEBU_CPU_DEC_WIN_REG_BASE + 0xf4)
Marek Behúnf9d5e732020-04-08 19:25:19 +020051
Pali Rohár6b8e4262022-02-16 11:18:44 +010052#define MAX_MEM_MAP_REGIONS (MVEBU_CPU_DEC_WINS + 4)
Marek Behúnf9d5e732020-04-08 19:25:19 +020053
54#define A3700_PTE_BLOCK_NORMAL \
55 (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE)
56#define A3700_PTE_BLOCK_DEVICE \
57 (PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE)
58
59DECLARE_GLOBAL_DATA_PTR;
60
61static struct mm_region mvebu_mem_map[MAX_MEM_MAP_REGIONS] = {
Stefan Roese05b17652016-05-17 15:00:30 +020062 {
Marek Behúnf9d5e732020-04-08 19:25:19 +020063 /*
64 * SRAM, MMIO regions
Pali Rohár291332d2022-02-16 11:18:45 +010065 * Don't remove this, build_mem_map needs it.
Marek Behúnf9d5e732020-04-08 19:25:19 +020066 */
67 .phys = SOC_REGS_PHY_BASE,
68 .virt = SOC_REGS_PHY_BASE,
Stefan Roese05b17652016-05-17 15:00:30 +020069 .size = 0x02000000UL, /* 32MiB internal registers */
Marek Behúnf9d5e732020-04-08 19:25:19 +020070 .attrs = A3700_PTE_BLOCK_DEVICE
Wilson Ding80af2f92018-03-26 15:57:28 +080071 },
Stefan Roese05b17652016-05-17 15:00:30 +020072};
73
74struct mm_region *mem_map = mvebu_mem_map;
75
Marek Behúnf9d5e732020-04-08 19:25:19 +020076static int get_cpu_dec_win(int win, u32 *tgt, u32 *base, u32 *size)
77{
78 u32 reg;
79
80 reg = readl(MVEBU_CPU_DEC_WIN_CTRL(win));
81 if (!(reg & MVEBU_CPU_DEC_WIN_CTRL_EN))
82 return -1;
83
84 if (tgt) {
85 reg >>= MVEBU_CPU_DEC_WIN_CTRL_TGT_OFFS;
86 reg &= MVEBU_CPU_DEC_WIN_CTRL_TGT_MASK;
87 *tgt = reg;
88 }
89
90 if (base) {
91 reg = readl(MVEBU_CPU_DEC_WIN_BASE(win));
92 *base = reg << MVEBU_CPU_DEC_WIN_GRANULARITY;
93 }
94
95 if (size) {
96 /*
97 * Window size is encoded as the number of 1s from LSB to MSB,
98 * followed by 0s. The number of 1s specifies the size in 64 KiB
99 * granularity.
100 */
101 reg = readl(MVEBU_CPU_DEC_WIN_SIZE(win));
102 *size = ((reg + 1) << MVEBU_CPU_DEC_WIN_GRANULARITY);
103 }
104
105 return 0;
106}
107
108/*
109 * Builds mem_map according to CPU Address Decoder settings, which were set by
110 * the TIMH image on the Cortex-M3 secure processor, or by ARM Trusted Firmware
111 */
112static void build_mem_map(void)
113{
114 int win, region;
Pali Rohár6b8e4262022-02-16 11:18:44 +0100115 u32 reg;
Marek Behúnf9d5e732020-04-08 19:25:19 +0200116
117 region = 1;
Pali Rohár6b8e4262022-02-16 11:18:44 +0100118
119 /* CCI-400 */
120 reg = readl(MVEBU_CPU_DEC_CCI_BASE);
121 mvebu_mem_map[region].phys = reg << 20;
122 mvebu_mem_map[region].virt = reg << 20;
123 mvebu_mem_map[region].size = SZ_64K;
124 mvebu_mem_map[region].attrs = A3700_PTE_BLOCK_DEVICE;
125 ++region;
126
127 /* AP BootROM */
128 reg = readl(MVEBU_CPU_DEC_ROM_BASE);
129 mvebu_mem_map[region].phys = reg << 20;
130 mvebu_mem_map[region].virt = reg << 20;
131 mvebu_mem_map[region].size = SZ_1M;
132 mvebu_mem_map[region].attrs = A3700_PTE_BLOCK_NORMAL;
133 ++region;
134
Marek Behúnf9d5e732020-04-08 19:25:19 +0200135 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
136 u32 base, tgt, size;
137 u64 attrs;
138
139 /* skip disabled windows */
140 if (get_cpu_dec_win(win, &tgt, &base, &size))
141 continue;
142
143 if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
144 attrs = A3700_PTE_BLOCK_NORMAL;
145 else if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE)
146 attrs = A3700_PTE_BLOCK_DEVICE;
147 else
148 /* skip windows with other targets */
149 continue;
150
151 mvebu_mem_map[region].phys = base;
152 mvebu_mem_map[region].virt = base;
153 mvebu_mem_map[region].size = size;
154 mvebu_mem_map[region].attrs = attrs;
155 ++region;
156 }
157
158 /* add list terminator */
159 mvebu_mem_map[region].size = 0;
160 mvebu_mem_map[region].attrs = 0;
161}
162
163void enable_caches(void)
164{
Marek Behúnf9d5e732020-04-08 19:25:19 +0200165 icache_enable();
166 dcache_enable();
167}
168
169int a3700_dram_init(void)
170{
171 int win;
172
Pali Rohár64e16872022-02-16 11:18:43 +0100173 build_mem_map();
174
Marek Behúnf9d5e732020-04-08 19:25:19 +0200175 gd->ram_size = 0;
176 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
177 u32 base, tgt, size;
178
179 /* skip disabled windows */
180 if (get_cpu_dec_win(win, &tgt, &base, &size))
181 continue;
182
183 /* skip non-DRAM windows */
184 if (tgt != MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
185 continue;
186
187 /*
188 * It is possible that one image was built for boards with
189 * different RAM sizes, for example 512 MiB and 1 GiB.
190 * We therefore try to determine the actual RAM size in the
191 * window with get_ram_size.
192 */
193 gd->ram_size += get_ram_size((void *)(size_t)base, size);
194 }
195
196 return 0;
197}
198
199struct a3700_dram_window {
200 size_t base, size;
201};
202
203static int dram_win_cmp(const void *a, const void *b)
204{
205 size_t ab, bb;
206
207 ab = ((const struct a3700_dram_window *)a)->base;
208 bb = ((const struct a3700_dram_window *)b)->base;
209
210 if (ab < bb)
211 return -1;
212 else if (ab > bb)
213 return 1;
214 else
215 return 0;
216}
217
218int a3700_dram_init_banksize(void)
219{
220 struct a3700_dram_window dram_wins[MVEBU_CPU_DEC_WINS];
221 int bank, win, ndram_wins;
222 u32 last_end;
223 size_t size;
224
225 ndram_wins = 0;
226 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
227 u32 base, tgt, size;
228
229 /* skip disabled windows */
230 if (get_cpu_dec_win(win, &tgt, &base, &size))
231 continue;
232
233 /* skip non-DRAM windows */
234 if (tgt != MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
235 continue;
236
237 dram_wins[win].base = base;
238 dram_wins[win].size = size;
239 ++ndram_wins;
240 }
241
242 qsort(dram_wins, ndram_wins, sizeof(dram_wins[0]), dram_win_cmp);
243
244 bank = 0;
245 last_end = -1;
246
247 for (win = 0; win < ndram_wins; ++win) {
248 /* again determining actual RAM size as in a3700_dram_init */
249 size = get_ram_size((void *)dram_wins[win].base,
250 dram_wins[win].size);
251
252 /*
253 * Check if previous window ends as the current starts. If yes,
254 * merge these windows into one "bank". This is possible by this
255 * simple check thanks to mem_map regions being qsorted in
256 * build_mem_map.
257 */
258 if (last_end == dram_wins[win].base) {
259 gd->bd->bi_dram[bank - 1].size += size;
260 last_end += size;
261 } else {
262 if (bank == CONFIG_NR_DRAM_BANKS) {
263 printf("Need more CONFIG_NR_DRAM_BANKS\n");
264 return -ENOBUFS;
265 }
266
267 gd->bd->bi_dram[bank].start = dram_wins[win].base;
268 gd->bd->bi_dram[bank].size = size;
269 last_end = dram_wins[win].base + size;
270 ++bank;
271 }
272 }
273
274 /*
275 * If there is more place for DRAM BANKS definitions than needed, fill
276 * the rest with zeros.
277 */
278 for (; bank < CONFIG_NR_DRAM_BANKS; ++bank) {
279 gd->bd->bi_dram[bank].start = 0;
280 gd->bd->bi_dram[bank].size = 0;
281 }
282
283 return 0;
284}
285
Marek Behún41d2c402020-04-08 19:25:21 +0200286static u32 find_pcie_window_base(void)
287{
288 int win;
289
290 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
291 u32 base, tgt;
292
293 /* skip disabled windows */
294 if (get_cpu_dec_win(win, &tgt, &base, NULL))
295 continue;
296
297 if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE)
298 return base;
299 }
300
301 return -1;
302}
303
Pali Rohár39fd2172021-05-26 17:59:38 +0200304static int fdt_setprop_inplace_u32_partial(void *blob, int node,
305 const char *name,
306 u32 idx, u32 val)
307{
308 val = cpu_to_fdt32(val);
309
310 return fdt_setprop_inplace_namelen_partial(blob, node, name,
311 strlen(name),
312 idx * sizeof(u32),
313 &val, sizeof(u32));
314}
315
Marek Behún41d2c402020-04-08 19:25:21 +0200316int a3700_fdt_fix_pcie_regions(void *blob)
317{
Pali Rohára25e9752022-02-23 13:52:32 +0100318 u32 base, lowest_cpu_addr, fix_offset;
319 int pci_cells, cpu_cells, size_cells;
Marek Behún41d2c402020-04-08 19:25:21 +0200320 const u32 *ranges;
Pali Rohár39fd2172021-05-26 17:59:38 +0200321 int node, pnode;
322 int ret, i, len;
323
324 base = find_pcie_window_base();
325 if (base == -1)
326 return -ENOENT;
Marek Behún41d2c402020-04-08 19:25:21 +0200327
Pali Rohárc5bc2802021-05-26 17:59:37 +0200328 node = fdt_node_offset_by_compatible(blob, -1, "marvell,armada-3700-pcie");
Marek Behún41d2c402020-04-08 19:25:21 +0200329 if (node < 0)
330 return node;
331
332 ranges = fdt_getprop(blob, node, "ranges", &len);
Pali Rohára25e9752022-02-23 13:52:32 +0100333 if (!ranges || !len || len % sizeof(u32))
334 return -EINVAL;
Marek Behún41d2c402020-04-08 19:25:21 +0200335
Pali Rohár39fd2172021-05-26 17:59:38 +0200336 /*
337 * The "ranges" property is an array of
Pali Rohára25e9752022-02-23 13:52:32 +0100338 * { <PCI address> <CPU address> <size in PCI address space> }
339 * where number of PCI address cells and size cells is stored in the
340 * "#address-cells" and "#size-cells" properties of the same node
341 * containing the "ranges" property and number of CPU address cells
342 * is stored in the parent's "#address-cells" property.
Pali Rohár39fd2172021-05-26 17:59:38 +0200343 *
Pali Rohára25e9752022-02-23 13:52:32 +0100344 * All 3 elements can span a diffent number of cells. Fetch them.
Pali Rohár39fd2172021-05-26 17:59:38 +0200345 */
346 pnode = fdt_parent_offset(blob, node);
Pali Rohára25e9752022-02-23 13:52:32 +0100347 pci_cells = fdt_address_cells(blob, node);
348 cpu_cells = fdt_address_cells(blob, pnode);
349 size_cells = fdt_size_cells(blob, node);
Marek Behún41d2c402020-04-08 19:25:21 +0200350
Pali Rohára25e9752022-02-23 13:52:32 +0100351 /* PCI addresses always use 3 cells */
352 if (pci_cells != 3)
353 return -EINVAL;
354
355 /* CPU addresses on Armada 37xx always use 2 cells */
356 if (cpu_cells != 2)
357 return -EINVAL;
Marek Behún41d2c402020-04-08 19:25:21 +0200358
Pali Rohára25e9752022-02-23 13:52:32 +0100359 for (i = 0; i < len / sizeof(u32);
360 i += pci_cells + cpu_cells + size_cells) {
361 /*
362 * Parent CPU addresses on Armada 37xx are always 32-bit, so
363 * check that the high word is zero.
364 */
365 if (fdt32_to_cpu(ranges[i + pci_cells]))
366 return -EINVAL;
Marek Behún41d2c402020-04-08 19:25:21 +0200367
Pali Rohára25e9752022-02-23 13:52:32 +0100368 if (i == 0 ||
369 fdt32_to_cpu(ranges[i + pci_cells + 1]) < lowest_cpu_addr)
370 lowest_cpu_addr = fdt32_to_cpu(ranges[i + pci_cells + 1]);
371 }
372
373 /* Calculate fixup offset from the lowest (first) CPU address */
374 fix_offset = base - lowest_cpu_addr;
375
376 /* If fixup offset is zero there is nothing to fix */
Pali Rohárb1a27602021-07-08 20:18:59 +0200377 if (!fix_offset)
378 return 0;
379
Pali Rohár39fd2172021-05-26 17:59:38 +0200380 /*
Pali Rohára25e9752022-02-23 13:52:32 +0100381 * Fix each CPU address and corresponding PCI address if PCI address
382 * is not already remapped (has the same value)
Pali Rohár39fd2172021-05-26 17:59:38 +0200383 */
Pali Rohára25e9752022-02-23 13:52:32 +0100384 for (i = 0; i < len / sizeof(u32);
385 i += pci_cells + cpu_cells + size_cells) {
386 u32 cpu_addr;
387 u64 pci_addr;
Pali Rohár39fd2172021-05-26 17:59:38 +0200388 int idx;
Marek Behún41d2c402020-04-08 19:25:21 +0200389
Pali Rohára25e9752022-02-23 13:52:32 +0100390 /* Fix CPU address */
391 idx = i + pci_cells + cpu_cells - 1;
392 cpu_addr = fdt32_to_cpu(ranges[idx]);
Pali Rohár39fd2172021-05-26 17:59:38 +0200393 ret = fdt_setprop_inplace_u32_partial(blob, node, "ranges", idx,
Pali Rohára25e9752022-02-23 13:52:32 +0100394 cpu_addr + fix_offset);
Pali Rohár39fd2172021-05-26 17:59:38 +0200395 if (ret)
396 return ret;
397
Pali Rohára25e9752022-02-23 13:52:32 +0100398 /* Fix PCI address only if it isn't remapped (is same as CPU) */
399 idx = i + pci_cells - 1;
400 pci_addr = ((u64)fdt32_to_cpu(ranges[idx - 1]) << 32) |
401 fdt32_to_cpu(ranges[idx]);
402 if (cpu_addr != pci_addr)
403 continue;
404
Pali Rohár39fd2172021-05-26 17:59:38 +0200405 ret = fdt_setprop_inplace_u32_partial(blob, node, "ranges", idx,
Pali Rohára25e9752022-02-23 13:52:32 +0100406 cpu_addr + fix_offset);
Pali Rohár39fd2172021-05-26 17:59:38 +0200407 if (ret)
408 return ret;
409 }
410
411 return 0;
Marek Behún41d2c402020-04-08 19:25:21 +0200412}
413
Harald Seiler6f14d5f2020-12-15 16:47:52 +0100414void reset_cpu(void)
Stefan Roese05b17652016-05-17 15:00:30 +0200415{
416 /*
417 * Write magic number of 0x1d1e to North Bridge Warm Reset register
418 * to trigger warm reset
419 */
420 writel(MVEBU_NB_WARM_RST_MAGIC_NUM, MVEBU_NB_WARM_RST_REG);
421}
422
423/*
424 * get_ref_clk
425 *
426 * return: reference clock in MHz (25 or 40)
427 */
428u32 get_ref_clk(void)
429{
430 u32 regval;
431
432 regval = (readl(MVEBU_TEST_PIN_LATCH_N) & MVEBU_XTAL_MODE_MASK) >>
433 MVEBU_XTAL_MODE_OFFS;
434
435 if (regval == MVEBU_XTAL_CLOCK_25MHZ)
436 return 25;
437 else
438 return 40;
439}