blob: 244070a242da1452dc3e160c5655eeb9d327a1a5 [file] [log] [blame]
Andrii Anisov355d1e42020-08-06 12:42:47 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) 2013
4 * David Feng <fenghua@phytium.com.cn>
5 * Sharma Bhupesh <bhupesh.sharma@freescale.com>
6 *
7 * (C) 2020 EPAM Systems Inc
8 */
9
10#include <common.h>
Andrii Chepurnyi2b6c9b52023-10-03 08:58:28 +000011#include <log.h>
Andrii Anisov355d1e42020-08-06 12:42:47 +030012#include <cpu_func.h>
13#include <dm.h>
14#include <errno.h>
15#include <malloc.h>
Oleksandr Andrushchenko10fa5362020-08-06 12:43:00 +030016#include <xen.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060017#include <asm/global_data.h>
Andrii Chepurnyi2b6c9b52023-10-03 08:58:28 +000018#include <virtio_types.h>
19#include <virtio.h>
Andrii Anisov355d1e42020-08-06 12:42:47 +030020
21#include <asm/io.h>
22#include <asm/armv8/mmu.h>
23#include <asm/xen.h>
24#include <asm/xen/hypercall.h>
Peng Fan8162f8f2020-08-06 12:42:50 +030025#include <asm/xen/system.h>
Andrii Anisov355d1e42020-08-06 12:42:47 +030026
27#include <linux/compiler.h>
28
Oleksandr Andrushchenko3cc1dcc2020-08-06 12:42:54 +030029#include <xen/gnttab.h>
Peng Fan8162f8f2020-08-06 12:42:50 +030030#include <xen/hvm.h>
31
Andrii Anisov355d1e42020-08-06 12:42:47 +030032DECLARE_GLOBAL_DATA_PTR;
33
34int board_init(void)
35{
36 return 0;
37}
38
39/*
40 * Use fdt provided by Xen: according to
41 * https://www.kernel.org/doc/Documentation/arm64/booting.txt
42 * x0 is the physical address of the device tree blob (dtb) in system RAM.
43 * This is stored in rom_pointer during low level init.
44 */
Ilias Apalodimasab5348a2021-10-26 09:12:33 +030045void *board_fdt_blob_setup(int *err)
Andrii Anisov355d1e42020-08-06 12:42:47 +030046{
Ilias Apalodimasab5348a2021-10-26 09:12:33 +030047 *err = 0;
48 if (fdt_magic(rom_pointer[0]) != FDT_MAGIC) {
49 *err = -ENXIO;
Andrii Anisov355d1e42020-08-06 12:42:47 +030050 return NULL;
Ilias Apalodimasab5348a2021-10-26 09:12:33 +030051 }
Andrii Anisov355d1e42020-08-06 12:42:47 +030052 return (void *)rom_pointer[0];
53}
54
Andrii Chepurnyi2b6c9b52023-10-03 08:58:28 +000055/*
56 * MAX_MEM_MAP_REGIONS should respect to:
57 * 3 Xen related regions
58 * 6 regions for 2 PCI Host bridges
59 * 10 regions for MMIO devices
60 * 2 memory regions
61 */
62#define MAX_MEM_MAP_REGIONS 22
Andrii Anisov355d1e42020-08-06 12:42:47 +030063static struct mm_region xen_mem_map[MAX_MEM_MAP_REGIONS];
64struct mm_region *mem_map = xen_mem_map;
65
66static int get_next_memory_node(const void *blob, int mem)
67{
68 do {
69 mem = fdt_node_offset_by_prop_value(blob, mem,
70 "device_type", "memory", 7);
71 } while (!fdtdec_get_is_enabled(blob, mem));
72
73 return mem;
74}
Andrii Chepurnyi2b6c9b52023-10-03 08:58:28 +000075
76#ifdef CONFIG_VIRTIO_BLK
77#ifdef CONFIG_VIRTIO_PCI
78static void add_pci_mem_map(const void *blob, int *cnt)
79{
80 struct fdt_resource reg_res;
81 int node = -1, len = 0, cells_per_record = 0, max_regions = 0;
82 int pci_addr_cells = 0, addr_cells = 0, size_cells = 0;
83
84 while ((node = fdt_node_offset_by_prop_value(blob, node, "compatible",
85 "pci-host-ecam-generic",
86 sizeof("pci-host-ecam-generic"))) >= 0) {
87 if ((*cnt) >= MAX_MEM_MAP_REGIONS ||
88 fdt_get_resource(blob, node, "reg", 0, &reg_res) < 0)
89 return;
90
91 xen_mem_map[*cnt].virt = reg_res.start;
92 xen_mem_map[*cnt].phys = reg_res.start;
93 xen_mem_map[*cnt].size = fdt_resource_size(&reg_res);
94 xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) |
95 PTE_BLOCK_INNER_SHARE);
96 (*cnt)++;
97
98 const u32 *prop = fdt_getprop(blob, node, "ranges", &len);
99
100 if (!prop)
101 return;
102
103 pci_addr_cells = fdt_address_cells(blob, node);
104 addr_cells = fdt_address_cells(blob, 0);
105 size_cells = fdt_size_cells(blob, node);
106
107 /* PCI addresses are always 3-cells */
108 len /= sizeof(u32);
109 cells_per_record = pci_addr_cells + addr_cells + size_cells;
110 max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS;
111
112 for (int i = 0; i < max_regions; i++, len -= cells_per_record) {
113 u64 pci_addr, addr, size;
114 int space_code;
115 u32 flags;
116
117 if (((*cnt) >= MAX_MEM_MAP_REGIONS) || len < cells_per_record)
118 return;
119
120 flags = fdt32_to_cpu(prop[0]);
121 space_code = (flags >> 24) & 3;
122 pci_addr = fdtdec_get_number(prop + 1, 2);
123 prop += pci_addr_cells;
124 addr = fdtdec_get_number(prop, addr_cells);
125 prop += addr_cells;
126 size = fdtdec_get_number(prop, size_cells);
127 prop += size_cells;
128
129 xen_mem_map[*cnt].virt = addr;
130 xen_mem_map[*cnt].phys = addr;
131 xen_mem_map[*cnt].size = size;
132 xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) |
133 PTE_BLOCK_INNER_SHARE);
134 (*cnt)++;
135 }
136 }
137}
138#endif
139
140#ifdef CONFIG_VIRTIO_MMIO
141static void add_mmio_mem_map(const void *blob, int *cnt)
142{
143 int node = -1;
144 struct fdt_resource reg_res;
145
146 if ((*cnt) >= MAX_MEM_MAP_REGIONS)
147 return;
148 while ((node = fdt_node_offset_by_prop_value(blob, node, "compatible", "virtio,mmio",
149 sizeof("virtio,mmio"))) >= 0) {
150 if (fdt_get_resource(blob, node, "reg", 0, &reg_res) < 0)
151 return;
152 xen_mem_map[*cnt].virt = reg_res.start;
153 xen_mem_map[*cnt].phys = reg_res.start;
154 xen_mem_map[*cnt].size = roundup(fdt_resource_size(&reg_res), PAGE_SIZE);
155 xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) |
156 PTE_BLOCK_INNER_SHARE);
157 (*cnt)++;
158 }
159}
160#endif
161#endif
Andrii Anisov355d1e42020-08-06 12:42:47 +0300162
163static int setup_mem_map(void)
164{
Peng Fan8162f8f2020-08-06 12:42:50 +0300165 int i = 0, ret, mem, reg = 0;
Andrii Anisov355d1e42020-08-06 12:42:47 +0300166 struct fdt_resource res;
167 const void *blob = gd->fdt_blob;
Peng Fan8162f8f2020-08-06 12:42:50 +0300168 u64 gfn;
Oleksandr Andrushchenko3cc1dcc2020-08-06 12:42:54 +0300169 phys_addr_t gnttab_base;
170 phys_size_t gnttab_sz;
Peng Fan8162f8f2020-08-06 12:42:50 +0300171
Andrii Chepurnyi2b6c9b52023-10-03 08:58:28 +0000172 memset(xen_mem_map, 0, sizeof(xen_mem_map));
Peng Fan8162f8f2020-08-06 12:42:50 +0300173 /*
174 * Add "magic" region which is used by Xen to provide some essentials
Oleksandr Andrushchenko4b728452020-08-06 12:42:53 +0300175 * for the guest: we need console and xenstore.
Peng Fan8162f8f2020-08-06 12:42:50 +0300176 */
177 ret = hvm_get_parameter_maintain_dcache(HVM_PARAM_CONSOLE_PFN, &gfn);
178 if (ret < 0) {
179 printf("%s: Can't get HVM_PARAM_CONSOLE_PFN, ret %d\n",
180 __func__, ret);
181 return -EINVAL;
182 }
183
184 xen_mem_map[i].virt = PFN_PHYS(gfn);
185 xen_mem_map[i].phys = PFN_PHYS(gfn);
186 xen_mem_map[i].size = PAGE_SIZE;
187 xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) |
188 PTE_BLOCK_INNER_SHARE);
189 i++;
Andrii Anisov355d1e42020-08-06 12:42:47 +0300190
Oleksandr Andrushchenko4b728452020-08-06 12:42:53 +0300191 ret = hvm_get_parameter_maintain_dcache(HVM_PARAM_STORE_PFN, &gfn);
192 if (ret < 0) {
193 printf("%s: Can't get HVM_PARAM_STORE_PFN, ret %d\n",
194 __func__, ret);
195 return -EINVAL;
196 }
197
198 xen_mem_map[i].virt = PFN_PHYS(gfn);
199 xen_mem_map[i].phys = PFN_PHYS(gfn);
200 xen_mem_map[i].size = PAGE_SIZE;
201 xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) |
202 PTE_BLOCK_INNER_SHARE);
203 i++;
204
Oleksandr Andrushchenko3cc1dcc2020-08-06 12:42:54 +0300205 /* Get Xen's suggested physical page assignments for the grant table. */
206 get_gnttab_base(&gnttab_base, &gnttab_sz);
207
208 xen_mem_map[i].virt = gnttab_base;
209 xen_mem_map[i].phys = gnttab_base;
210 xen_mem_map[i].size = gnttab_sz;
211 xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) |
212 PTE_BLOCK_INNER_SHARE);
213 i++;
214
Andrii Anisov355d1e42020-08-06 12:42:47 +0300215 mem = get_next_memory_node(blob, -1);
216 if (mem < 0) {
217 printf("%s: Missing /memory node\n", __func__);
218 return -EINVAL;
219 }
220
Peng Fan8162f8f2020-08-06 12:42:50 +0300221 for (; i < MAX_MEM_MAP_REGIONS; i++) {
Andrii Anisov355d1e42020-08-06 12:42:47 +0300222 ret = fdt_get_resource(blob, mem, "reg", reg++, &res);
223 if (ret == -FDT_ERR_NOTFOUND) {
224 reg = 0;
225 mem = get_next_memory_node(blob, mem);
226 if (mem == -FDT_ERR_NOTFOUND)
227 break;
228
229 ret = fdt_get_resource(blob, mem, "reg", reg++, &res);
230 if (ret == -FDT_ERR_NOTFOUND)
231 break;
232 }
233 if (ret != 0) {
234 printf("No reg property for memory node\n");
235 return -EINVAL;
236 }
237
238 xen_mem_map[i].virt = (phys_addr_t)res.start;
239 xen_mem_map[i].phys = (phys_addr_t)res.start;
240 xen_mem_map[i].size = (phys_size_t)(res.end - res.start + 1);
241 xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) |
242 PTE_BLOCK_INNER_SHARE);
243 }
Andrii Chepurnyi2b6c9b52023-10-03 08:58:28 +0000244#ifdef CONFIG_VIRTIO_BLK
245#ifdef CONFIG_VIRTIO_PCI
246 add_pci_mem_map(blob, &i);
247#endif
248#ifdef CONFIG_VIRTIO_MMIO
249 add_mmio_mem_map(blob, &i);
250#endif
251#endif
Andrii Anisov355d1e42020-08-06 12:42:47 +0300252 return 0;
253}
254
255void enable_caches(void)
256{
257 /* Re-setup the memory map as BSS gets cleared after relocation. */
258 setup_mem_map();
259 icache_enable();
260 dcache_enable();
261}
262
263/* Read memory settings from the Xen provided device tree. */
264int dram_init(void)
265{
266 int ret;
267
268 ret = fdtdec_setup_mem_size_base();
269 if (ret < 0)
270 return ret;
271 /* Setup memory map, so MMU page table size can be estimated. */
272 return setup_mem_map();
273}
274
275int dram_init_banksize(void)
276{
277 return fdtdec_setup_memory_banksize();
278}
279
280/*
281 * Board specific reset that is system reset.
282 */
Harald Seiler6f14d5f2020-12-15 16:47:52 +0100283void reset_cpu(void)
Andrii Anisov355d1e42020-08-06 12:42:47 +0300284{
285}
286
287int ft_system_setup(void *blob, struct bd_info *bd)
288{
289 return 0;
290}
291
292int ft_board_setup(void *blob, struct bd_info *bd)
293{
294 return 0;
295}
296
Andrii Anisov355d1e42020-08-06 12:42:47 +0300297int print_cpuinfo(void)
298{
299 printf("Xen virtual CPU\n");
300 return 0;
301}
302
Oleksandr Andrushchenko10fa5362020-08-06 12:43:00 +0300303void board_cleanup_before_linux(void)
304{
305 xen_fini();
306}