blob: ab72b304e5daaedd829cee80e2a1b34aef53f597 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Stefan Roese05b17652016-05-17 15:00:30 +02002/*
3 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
Marek Behúnd63726e2022-06-01 17:17:06 +02004 * Copyright (C) 2020 Marek Behún <kabel@kernel.org>
Stefan Roese05b17652016-05-17 15:00:30 +02005 */
6
7#include <common.h>
Simon Glassafb02152019-12-28 10:45:01 -07008#include <cpu_func.h>
Stefan Roese05b17652016-05-17 15:00:30 +02009#include <dm.h>
10#include <fdtdec.h>
Pali Rohár39fd2172021-05-26 17:59:38 +020011#include <fdt_support.h>
Simon Glass97589732020-05-10 11:40:02 -060012#include <init.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060013#include <asm/global_data.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060014#include <linux/bitops.h>
Masahiro Yamada75f82d02018-03-05 01:20:11 +090015#include <linux/libfdt.h>
Pali Rohár6b8e4262022-02-16 11:18:44 +010016#include <linux/sizes.h>
Stefan Roese05b17652016-05-17 15:00:30 +020017#include <asm/io.h>
18#include <asm/system.h>
19#include <asm/arch/cpu.h>
20#include <asm/arch/soc.h>
21#include <asm/armv8/mmu.h>
Marek Behúnf9d5e732020-04-08 19:25:19 +020022#include <sort.h>
Stefan Roese05b17652016-05-17 15:00:30 +020023
Stefan Roese05b17652016-05-17 15:00:30 +020024/* Armada 3700 */
25#define MVEBU_GPIO_NB_REG_BASE (MVEBU_REGISTER(0x13800))
26
27#define MVEBU_TEST_PIN_LATCH_N (MVEBU_GPIO_NB_REG_BASE + 0x8)
28#define MVEBU_XTAL_MODE_MASK BIT(9)
29#define MVEBU_XTAL_MODE_OFFS 9
30#define MVEBU_XTAL_CLOCK_25MHZ 0x0
31#define MVEBU_XTAL_CLOCK_40MHZ 0x1
32
33#define MVEBU_NB_WARM_RST_REG (MVEBU_GPIO_NB_REG_BASE + 0x40)
34#define MVEBU_NB_WARM_RST_MAGIC_NUM 0x1d1e
35
Marek Behúnf9d5e732020-04-08 19:25:19 +020036/* Armada 3700 CPU Address Decoder registers */
37#define MVEBU_CPU_DEC_WIN_REG_BASE (size_t)(MVEBU_REGISTER(0xcf00))
38#define MVEBU_CPU_DEC_WIN_CTRL(w) \
39 (MVEBU_CPU_DEC_WIN_REG_BASE + ((w) << 4))
40#define MVEBU_CPU_DEC_WIN_CTRL_EN BIT(0)
41#define MVEBU_CPU_DEC_WIN_CTRL_TGT_MASK 0xf
42#define MVEBU_CPU_DEC_WIN_CTRL_TGT_OFFS 4
43#define MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM 0
44#define MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE 2
45#define MVEBU_CPU_DEC_WIN_SIZE(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0x4)
46#define MVEBU_CPU_DEC_WIN_BASE(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0x8)
47#define MVEBU_CPU_DEC_WIN_REMAP(w) (MVEBU_CPU_DEC_WIN_CTRL(w) + 0xc)
48#define MVEBU_CPU_DEC_WIN_GRANULARITY 16
49#define MVEBU_CPU_DEC_WINS 5
Pali Rohár6b8e4262022-02-16 11:18:44 +010050#define MVEBU_CPU_DEC_CCI_BASE (MVEBU_CPU_DEC_WIN_REG_BASE + 0xe0)
51#define MVEBU_CPU_DEC_ROM_BASE (MVEBU_CPU_DEC_WIN_REG_BASE + 0xf4)
Marek Behúnf9d5e732020-04-08 19:25:19 +020052
Pali Rohár6b8e4262022-02-16 11:18:44 +010053#define MAX_MEM_MAP_REGIONS (MVEBU_CPU_DEC_WINS + 4)
Marek Behúnf9d5e732020-04-08 19:25:19 +020054
55#define A3700_PTE_BLOCK_NORMAL \
56 (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE)
57#define A3700_PTE_BLOCK_DEVICE \
58 (PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE)
59
60DECLARE_GLOBAL_DATA_PTR;
61
62static struct mm_region mvebu_mem_map[MAX_MEM_MAP_REGIONS] = {
Stefan Roese05b17652016-05-17 15:00:30 +020063 {
Marek Behúnf9d5e732020-04-08 19:25:19 +020064 /*
65 * SRAM, MMIO regions
Pali Rohár291332d2022-02-16 11:18:45 +010066 * Don't remove this, build_mem_map needs it.
Marek Behúnf9d5e732020-04-08 19:25:19 +020067 */
68 .phys = SOC_REGS_PHY_BASE,
69 .virt = SOC_REGS_PHY_BASE,
Stefan Roese05b17652016-05-17 15:00:30 +020070 .size = 0x02000000UL, /* 32MiB internal registers */
Marek Behúnf9d5e732020-04-08 19:25:19 +020071 .attrs = A3700_PTE_BLOCK_DEVICE
Wilson Ding80af2f92018-03-26 15:57:28 +080072 },
Stefan Roese05b17652016-05-17 15:00:30 +020073};
74
75struct mm_region *mem_map = mvebu_mem_map;
76
Marek Behúnf9d5e732020-04-08 19:25:19 +020077static int get_cpu_dec_win(int win, u32 *tgt, u32 *base, u32 *size)
78{
79 u32 reg;
80
81 reg = readl(MVEBU_CPU_DEC_WIN_CTRL(win));
82 if (!(reg & MVEBU_CPU_DEC_WIN_CTRL_EN))
83 return -1;
84
85 if (tgt) {
86 reg >>= MVEBU_CPU_DEC_WIN_CTRL_TGT_OFFS;
87 reg &= MVEBU_CPU_DEC_WIN_CTRL_TGT_MASK;
88 *tgt = reg;
89 }
90
91 if (base) {
92 reg = readl(MVEBU_CPU_DEC_WIN_BASE(win));
93 *base = reg << MVEBU_CPU_DEC_WIN_GRANULARITY;
94 }
95
96 if (size) {
97 /*
98 * Window size is encoded as the number of 1s from LSB to MSB,
99 * followed by 0s. The number of 1s specifies the size in 64 KiB
100 * granularity.
101 */
102 reg = readl(MVEBU_CPU_DEC_WIN_SIZE(win));
103 *size = ((reg + 1) << MVEBU_CPU_DEC_WIN_GRANULARITY);
104 }
105
106 return 0;
107}
108
109/*
110 * Builds mem_map according to CPU Address Decoder settings, which were set by
111 * the TIMH image on the Cortex-M3 secure processor, or by ARM Trusted Firmware
112 */
113static void build_mem_map(void)
114{
115 int win, region;
Pali Rohár6b8e4262022-02-16 11:18:44 +0100116 u32 reg;
Marek Behúnf9d5e732020-04-08 19:25:19 +0200117
118 region = 1;
Pali Rohár6b8e4262022-02-16 11:18:44 +0100119
120 /* CCI-400 */
121 reg = readl(MVEBU_CPU_DEC_CCI_BASE);
122 mvebu_mem_map[region].phys = reg << 20;
123 mvebu_mem_map[region].virt = reg << 20;
124 mvebu_mem_map[region].size = SZ_64K;
125 mvebu_mem_map[region].attrs = A3700_PTE_BLOCK_DEVICE;
126 ++region;
127
128 /* AP BootROM */
129 reg = readl(MVEBU_CPU_DEC_ROM_BASE);
130 mvebu_mem_map[region].phys = reg << 20;
131 mvebu_mem_map[region].virt = reg << 20;
132 mvebu_mem_map[region].size = SZ_1M;
133 mvebu_mem_map[region].attrs = A3700_PTE_BLOCK_NORMAL;
134 ++region;
135
Marek Behúnf9d5e732020-04-08 19:25:19 +0200136 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
137 u32 base, tgt, size;
138 u64 attrs;
139
140 /* skip disabled windows */
141 if (get_cpu_dec_win(win, &tgt, &base, &size))
142 continue;
143
144 if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
145 attrs = A3700_PTE_BLOCK_NORMAL;
146 else if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE)
147 attrs = A3700_PTE_BLOCK_DEVICE;
148 else
149 /* skip windows with other targets */
150 continue;
151
152 mvebu_mem_map[region].phys = base;
153 mvebu_mem_map[region].virt = base;
154 mvebu_mem_map[region].size = size;
155 mvebu_mem_map[region].attrs = attrs;
156 ++region;
157 }
158
159 /* add list terminator */
160 mvebu_mem_map[region].size = 0;
161 mvebu_mem_map[region].attrs = 0;
162}
163
164void enable_caches(void)
165{
Marek Behúnf9d5e732020-04-08 19:25:19 +0200166 icache_enable();
167 dcache_enable();
168}
169
170int a3700_dram_init(void)
171{
172 int win;
173
Pali Rohár64e16872022-02-16 11:18:43 +0100174 build_mem_map();
175
Marek Behúnf9d5e732020-04-08 19:25:19 +0200176 gd->ram_size = 0;
177 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
178 u32 base, tgt, size;
179
180 /* skip disabled windows */
181 if (get_cpu_dec_win(win, &tgt, &base, &size))
182 continue;
183
184 /* skip non-DRAM windows */
185 if (tgt != MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
186 continue;
187
188 /*
189 * It is possible that one image was built for boards with
190 * different RAM sizes, for example 512 MiB and 1 GiB.
191 * We therefore try to determine the actual RAM size in the
192 * window with get_ram_size.
193 */
194 gd->ram_size += get_ram_size((void *)(size_t)base, size);
195 }
196
197 return 0;
198}
199
200struct a3700_dram_window {
201 size_t base, size;
202};
203
204static int dram_win_cmp(const void *a, const void *b)
205{
206 size_t ab, bb;
207
208 ab = ((const struct a3700_dram_window *)a)->base;
209 bb = ((const struct a3700_dram_window *)b)->base;
210
211 if (ab < bb)
212 return -1;
213 else if (ab > bb)
214 return 1;
215 else
216 return 0;
217}
218
219int a3700_dram_init_banksize(void)
220{
221 struct a3700_dram_window dram_wins[MVEBU_CPU_DEC_WINS];
222 int bank, win, ndram_wins;
223 u32 last_end;
224 size_t size;
225
226 ndram_wins = 0;
227 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
228 u32 base, tgt, size;
229
230 /* skip disabled windows */
231 if (get_cpu_dec_win(win, &tgt, &base, &size))
232 continue;
233
234 /* skip non-DRAM windows */
235 if (tgt != MVEBU_CPU_DEC_WIN_CTRL_TGT_DRAM)
236 continue;
237
238 dram_wins[win].base = base;
239 dram_wins[win].size = size;
240 ++ndram_wins;
241 }
242
243 qsort(dram_wins, ndram_wins, sizeof(dram_wins[0]), dram_win_cmp);
244
245 bank = 0;
246 last_end = -1;
247
248 for (win = 0; win < ndram_wins; ++win) {
249 /* again determining actual RAM size as in a3700_dram_init */
250 size = get_ram_size((void *)dram_wins[win].base,
251 dram_wins[win].size);
252
253 /*
254 * Check if previous window ends as the current starts. If yes,
255 * merge these windows into one "bank". This is possible by this
256 * simple check thanks to mem_map regions being qsorted in
257 * build_mem_map.
258 */
259 if (last_end == dram_wins[win].base) {
260 gd->bd->bi_dram[bank - 1].size += size;
261 last_end += size;
262 } else {
263 if (bank == CONFIG_NR_DRAM_BANKS) {
264 printf("Need more CONFIG_NR_DRAM_BANKS\n");
265 return -ENOBUFS;
266 }
267
268 gd->bd->bi_dram[bank].start = dram_wins[win].base;
269 gd->bd->bi_dram[bank].size = size;
270 last_end = dram_wins[win].base + size;
271 ++bank;
272 }
273 }
274
275 /*
276 * If there is more place for DRAM BANKS definitions than needed, fill
277 * the rest with zeros.
278 */
279 for (; bank < CONFIG_NR_DRAM_BANKS; ++bank) {
280 gd->bd->bi_dram[bank].start = 0;
281 gd->bd->bi_dram[bank].size = 0;
282 }
283
284 return 0;
285}
286
Marek Behún41d2c402020-04-08 19:25:21 +0200287static u32 find_pcie_window_base(void)
288{
289 int win;
290
291 for (win = 0; win < MVEBU_CPU_DEC_WINS; ++win) {
292 u32 base, tgt;
293
294 /* skip disabled windows */
295 if (get_cpu_dec_win(win, &tgt, &base, NULL))
296 continue;
297
298 if (tgt == MVEBU_CPU_DEC_WIN_CTRL_TGT_PCIE)
299 return base;
300 }
301
302 return -1;
303}
304
Pali Rohár39fd2172021-05-26 17:59:38 +0200305static int fdt_setprop_inplace_u32_partial(void *blob, int node,
306 const char *name,
307 u32 idx, u32 val)
308{
309 val = cpu_to_fdt32(val);
310
311 return fdt_setprop_inplace_namelen_partial(blob, node, name,
312 strlen(name),
313 idx * sizeof(u32),
314 &val, sizeof(u32));
315}
316
Marek Behún41d2c402020-04-08 19:25:21 +0200317int a3700_fdt_fix_pcie_regions(void *blob)
318{
Pali Rohára25e9752022-02-23 13:52:32 +0100319 u32 base, lowest_cpu_addr, fix_offset;
320 int pci_cells, cpu_cells, size_cells;
Marek Behún41d2c402020-04-08 19:25:21 +0200321 const u32 *ranges;
Pali Rohár39fd2172021-05-26 17:59:38 +0200322 int node, pnode;
323 int ret, i, len;
324
325 base = find_pcie_window_base();
326 if (base == -1)
327 return -ENOENT;
Marek Behún41d2c402020-04-08 19:25:21 +0200328
Pali Rohárc5bc2802021-05-26 17:59:37 +0200329 node = fdt_node_offset_by_compatible(blob, -1, "marvell,armada-3700-pcie");
Marek Behún41d2c402020-04-08 19:25:21 +0200330 if (node < 0)
331 return node;
332
333 ranges = fdt_getprop(blob, node, "ranges", &len);
Pali Rohára25e9752022-02-23 13:52:32 +0100334 if (!ranges || !len || len % sizeof(u32))
335 return -EINVAL;
Marek Behún41d2c402020-04-08 19:25:21 +0200336
Pali Rohár39fd2172021-05-26 17:59:38 +0200337 /*
338 * The "ranges" property is an array of
Pali Rohára25e9752022-02-23 13:52:32 +0100339 * { <PCI address> <CPU address> <size in PCI address space> }
340 * where number of PCI address cells and size cells is stored in the
341 * "#address-cells" and "#size-cells" properties of the same node
342 * containing the "ranges" property and number of CPU address cells
343 * is stored in the parent's "#address-cells" property.
Pali Rohár39fd2172021-05-26 17:59:38 +0200344 *
Pali Rohára25e9752022-02-23 13:52:32 +0100345 * All 3 elements can span a diffent number of cells. Fetch them.
Pali Rohár39fd2172021-05-26 17:59:38 +0200346 */
347 pnode = fdt_parent_offset(blob, node);
Pali Rohára25e9752022-02-23 13:52:32 +0100348 pci_cells = fdt_address_cells(blob, node);
349 cpu_cells = fdt_address_cells(blob, pnode);
350 size_cells = fdt_size_cells(blob, node);
Marek Behún41d2c402020-04-08 19:25:21 +0200351
Pali Rohára25e9752022-02-23 13:52:32 +0100352 /* PCI addresses always use 3 cells */
353 if (pci_cells != 3)
354 return -EINVAL;
355
356 /* CPU addresses on Armada 37xx always use 2 cells */
357 if (cpu_cells != 2)
358 return -EINVAL;
Marek Behún41d2c402020-04-08 19:25:21 +0200359
Pali Rohára25e9752022-02-23 13:52:32 +0100360 for (i = 0; i < len / sizeof(u32);
361 i += pci_cells + cpu_cells + size_cells) {
362 /*
363 * Parent CPU addresses on Armada 37xx are always 32-bit, so
364 * check that the high word is zero.
365 */
366 if (fdt32_to_cpu(ranges[i + pci_cells]))
367 return -EINVAL;
Marek Behún41d2c402020-04-08 19:25:21 +0200368
Pali Rohára25e9752022-02-23 13:52:32 +0100369 if (i == 0 ||
370 fdt32_to_cpu(ranges[i + pci_cells + 1]) < lowest_cpu_addr)
371 lowest_cpu_addr = fdt32_to_cpu(ranges[i + pci_cells + 1]);
372 }
373
374 /* Calculate fixup offset from the lowest (first) CPU address */
375 fix_offset = base - lowest_cpu_addr;
376
377 /* If fixup offset is zero there is nothing to fix */
Pali Rohárb1a27602021-07-08 20:18:59 +0200378 if (!fix_offset)
379 return 0;
380
Pali Rohár39fd2172021-05-26 17:59:38 +0200381 /*
Pali Rohára25e9752022-02-23 13:52:32 +0100382 * Fix each CPU address and corresponding PCI address if PCI address
383 * is not already remapped (has the same value)
Pali Rohár39fd2172021-05-26 17:59:38 +0200384 */
Pali Rohára25e9752022-02-23 13:52:32 +0100385 for (i = 0; i < len / sizeof(u32);
386 i += pci_cells + cpu_cells + size_cells) {
387 u32 cpu_addr;
388 u64 pci_addr;
Pali Rohár39fd2172021-05-26 17:59:38 +0200389 int idx;
Marek Behún41d2c402020-04-08 19:25:21 +0200390
Pali Rohára25e9752022-02-23 13:52:32 +0100391 /* Fix CPU address */
392 idx = i + pci_cells + cpu_cells - 1;
393 cpu_addr = fdt32_to_cpu(ranges[idx]);
Pali Rohár39fd2172021-05-26 17:59:38 +0200394 ret = fdt_setprop_inplace_u32_partial(blob, node, "ranges", idx,
Pali Rohára25e9752022-02-23 13:52:32 +0100395 cpu_addr + fix_offset);
Pali Rohár39fd2172021-05-26 17:59:38 +0200396 if (ret)
397 return ret;
398
Pali Rohára25e9752022-02-23 13:52:32 +0100399 /* Fix PCI address only if it isn't remapped (is same as CPU) */
400 idx = i + pci_cells - 1;
401 pci_addr = ((u64)fdt32_to_cpu(ranges[idx - 1]) << 32) |
402 fdt32_to_cpu(ranges[idx]);
403 if (cpu_addr != pci_addr)
404 continue;
405
Pali Rohár39fd2172021-05-26 17:59:38 +0200406 ret = fdt_setprop_inplace_u32_partial(blob, node, "ranges", idx,
Pali Rohára25e9752022-02-23 13:52:32 +0100407 cpu_addr + fix_offset);
Pali Rohár39fd2172021-05-26 17:59:38 +0200408 if (ret)
409 return ret;
410 }
411
412 return 0;
Marek Behún41d2c402020-04-08 19:25:21 +0200413}
414
Harald Seiler6f14d5f2020-12-15 16:47:52 +0100415void reset_cpu(void)
Stefan Roese05b17652016-05-17 15:00:30 +0200416{
417 /*
418 * Write magic number of 0x1d1e to North Bridge Warm Reset register
419 * to trigger warm reset
420 */
421 writel(MVEBU_NB_WARM_RST_MAGIC_NUM, MVEBU_NB_WARM_RST_REG);
422}
423
424/*
425 * get_ref_clk
426 *
427 * return: reference clock in MHz (25 or 40)
428 */
429u32 get_ref_clk(void)
430{
431 u32 regval;
432
433 regval = (readl(MVEBU_TEST_PIN_LATCH_N) & MVEBU_XTAL_MODE_MASK) >>
434 MVEBU_XTAL_MODE_OFFS;
435
436 if (regval == MVEBU_XTAL_CLOCK_25MHZ)
437 return 25;
438 else
439 return 40;
440}