Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * (C) 2013 |
| 4 | * David Feng <fenghua@phytium.com.cn> |
| 5 | * Sharma Bhupesh <bhupesh.sharma@freescale.com> |
| 6 | * |
| 7 | * (C) 2020 EPAM Systems Inc |
| 8 | */ |
| 9 | |
| 10 | #include <common.h> |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 11 | #include <log.h> |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 12 | #include <cpu_func.h> |
| 13 | #include <dm.h> |
| 14 | #include <errno.h> |
| 15 | #include <malloc.h> |
Oleksandr Andrushchenko | 10fa536 | 2020-08-06 12:43:00 +0300 | [diff] [blame] | 16 | #include <xen.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 17 | #include <asm/global_data.h> |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 18 | #include <virtio_types.h> |
| 19 | #include <virtio.h> |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 20 | |
| 21 | #include <asm/io.h> |
| 22 | #include <asm/armv8/mmu.h> |
| 23 | #include <asm/xen.h> |
| 24 | #include <asm/xen/hypercall.h> |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 25 | #include <asm/xen/system.h> |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 26 | |
| 27 | #include <linux/compiler.h> |
| 28 | |
Oleksandr Andrushchenko | 3cc1dcc | 2020-08-06 12:42:54 +0300 | [diff] [blame] | 29 | #include <xen/gnttab.h> |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 30 | #include <xen/hvm.h> |
| 31 | |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 32 | DECLARE_GLOBAL_DATA_PTR; |
| 33 | |
Peng Fan | e8cff1c | 2024-01-26 18:29:38 +0800 | [diff] [blame] | 34 | #define GUEST_VIRTIO_MMIO_BASE 0x2000000 |
| 35 | #define GUEST_VIRTIO_MMIO_SIZE 0x100000 |
| 36 | |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 37 | int board_init(void) |
| 38 | { |
| 39 | return 0; |
| 40 | } |
| 41 | |
| 42 | /* |
| 43 | * Use fdt provided by Xen: according to |
| 44 | * https://www.kernel.org/doc/Documentation/arm64/booting.txt |
| 45 | * x0 is the physical address of the device tree blob (dtb) in system RAM. |
| 46 | * This is stored in rom_pointer during low level init. |
| 47 | */ |
Ilias Apalodimas | ab5348a | 2021-10-26 09:12:33 +0300 | [diff] [blame] | 48 | void *board_fdt_blob_setup(int *err) |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 49 | { |
Ilias Apalodimas | ab5348a | 2021-10-26 09:12:33 +0300 | [diff] [blame] | 50 | *err = 0; |
| 51 | if (fdt_magic(rom_pointer[0]) != FDT_MAGIC) { |
| 52 | *err = -ENXIO; |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 53 | return NULL; |
Ilias Apalodimas | ab5348a | 2021-10-26 09:12:33 +0300 | [diff] [blame] | 54 | } |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 55 | return (void *)rom_pointer[0]; |
| 56 | } |
| 57 | |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 58 | /* |
| 59 | * MAX_MEM_MAP_REGIONS should respect to: |
| 60 | * 3 Xen related regions |
| 61 | * 6 regions for 2 PCI Host bridges |
| 62 | * 10 regions for MMIO devices |
| 63 | * 2 memory regions |
| 64 | */ |
| 65 | #define MAX_MEM_MAP_REGIONS 22 |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 66 | static struct mm_region xen_mem_map[MAX_MEM_MAP_REGIONS]; |
| 67 | struct mm_region *mem_map = xen_mem_map; |
| 68 | |
| 69 | static int get_next_memory_node(const void *blob, int mem) |
| 70 | { |
| 71 | do { |
| 72 | mem = fdt_node_offset_by_prop_value(blob, mem, |
| 73 | "device_type", "memory", 7); |
| 74 | } while (!fdtdec_get_is_enabled(blob, mem)); |
| 75 | |
| 76 | return mem; |
| 77 | } |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 78 | |
| 79 | #ifdef CONFIG_VIRTIO_BLK |
| 80 | #ifdef CONFIG_VIRTIO_PCI |
| 81 | static void add_pci_mem_map(const void *blob, int *cnt) |
| 82 | { |
| 83 | struct fdt_resource reg_res; |
| 84 | int node = -1, len = 0, cells_per_record = 0, max_regions = 0; |
| 85 | int pci_addr_cells = 0, addr_cells = 0, size_cells = 0; |
| 86 | |
| 87 | while ((node = fdt_node_offset_by_prop_value(blob, node, "compatible", |
| 88 | "pci-host-ecam-generic", |
| 89 | sizeof("pci-host-ecam-generic"))) >= 0) { |
| 90 | if ((*cnt) >= MAX_MEM_MAP_REGIONS || |
| 91 | fdt_get_resource(blob, node, "reg", 0, ®_res) < 0) |
| 92 | return; |
| 93 | |
| 94 | xen_mem_map[*cnt].virt = reg_res.start; |
| 95 | xen_mem_map[*cnt].phys = reg_res.start; |
| 96 | xen_mem_map[*cnt].size = fdt_resource_size(®_res); |
| 97 | xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 98 | PTE_BLOCK_INNER_SHARE); |
| 99 | (*cnt)++; |
| 100 | |
| 101 | const u32 *prop = fdt_getprop(blob, node, "ranges", &len); |
| 102 | |
| 103 | if (!prop) |
| 104 | return; |
| 105 | |
| 106 | pci_addr_cells = fdt_address_cells(blob, node); |
| 107 | addr_cells = fdt_address_cells(blob, 0); |
| 108 | size_cells = fdt_size_cells(blob, node); |
| 109 | |
| 110 | /* PCI addresses are always 3-cells */ |
| 111 | len /= sizeof(u32); |
| 112 | cells_per_record = pci_addr_cells + addr_cells + size_cells; |
| 113 | max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS; |
| 114 | |
| 115 | for (int i = 0; i < max_regions; i++, len -= cells_per_record) { |
| 116 | u64 pci_addr, addr, size; |
| 117 | int space_code; |
| 118 | u32 flags; |
| 119 | |
| 120 | if (((*cnt) >= MAX_MEM_MAP_REGIONS) || len < cells_per_record) |
| 121 | return; |
| 122 | |
| 123 | flags = fdt32_to_cpu(prop[0]); |
| 124 | space_code = (flags >> 24) & 3; |
| 125 | pci_addr = fdtdec_get_number(prop + 1, 2); |
| 126 | prop += pci_addr_cells; |
| 127 | addr = fdtdec_get_number(prop, addr_cells); |
| 128 | prop += addr_cells; |
| 129 | size = fdtdec_get_number(prop, size_cells); |
| 130 | prop += size_cells; |
| 131 | |
| 132 | xen_mem_map[*cnt].virt = addr; |
| 133 | xen_mem_map[*cnt].phys = addr; |
| 134 | xen_mem_map[*cnt].size = size; |
| 135 | xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 136 | PTE_BLOCK_INNER_SHARE); |
| 137 | (*cnt)++; |
| 138 | } |
| 139 | } |
| 140 | } |
| 141 | #endif |
| 142 | |
| 143 | #ifdef CONFIG_VIRTIO_MMIO |
| 144 | static void add_mmio_mem_map(const void *blob, int *cnt) |
| 145 | { |
| 146 | int node = -1; |
| 147 | struct fdt_resource reg_res; |
| 148 | |
| 149 | if ((*cnt) >= MAX_MEM_MAP_REGIONS) |
| 150 | return; |
| 151 | while ((node = fdt_node_offset_by_prop_value(blob, node, "compatible", "virtio,mmio", |
| 152 | sizeof("virtio,mmio"))) >= 0) { |
| 153 | if (fdt_get_resource(blob, node, "reg", 0, ®_res) < 0) |
| 154 | return; |
| 155 | xen_mem_map[*cnt].virt = reg_res.start; |
| 156 | xen_mem_map[*cnt].phys = reg_res.start; |
| 157 | xen_mem_map[*cnt].size = roundup(fdt_resource_size(®_res), PAGE_SIZE); |
| 158 | xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 159 | PTE_BLOCK_INNER_SHARE); |
| 160 | (*cnt)++; |
| 161 | } |
| 162 | } |
| 163 | #endif |
| 164 | #endif |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 165 | |
| 166 | static int setup_mem_map(void) |
| 167 | { |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 168 | int i = 0, ret, mem, reg = 0; |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 169 | struct fdt_resource res; |
| 170 | const void *blob = gd->fdt_blob; |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 171 | u64 gfn; |
Oleksandr Andrushchenko | 3cc1dcc | 2020-08-06 12:42:54 +0300 | [diff] [blame] | 172 | phys_addr_t gnttab_base; |
| 173 | phys_size_t gnttab_sz; |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 174 | |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 175 | memset(xen_mem_map, 0, sizeof(xen_mem_map)); |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 176 | /* |
| 177 | * Add "magic" region which is used by Xen to provide some essentials |
Oleksandr Andrushchenko | 4b72845 | 2020-08-06 12:42:53 +0300 | [diff] [blame] | 178 | * for the guest: we need console and xenstore. |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 179 | */ |
| 180 | ret = hvm_get_parameter_maintain_dcache(HVM_PARAM_CONSOLE_PFN, &gfn); |
| 181 | if (ret < 0) { |
| 182 | printf("%s: Can't get HVM_PARAM_CONSOLE_PFN, ret %d\n", |
| 183 | __func__, ret); |
| 184 | return -EINVAL; |
| 185 | } |
| 186 | |
| 187 | xen_mem_map[i].virt = PFN_PHYS(gfn); |
| 188 | xen_mem_map[i].phys = PFN_PHYS(gfn); |
| 189 | xen_mem_map[i].size = PAGE_SIZE; |
| 190 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 191 | PTE_BLOCK_INNER_SHARE); |
| 192 | i++; |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 193 | |
Oleksandr Andrushchenko | 4b72845 | 2020-08-06 12:42:53 +0300 | [diff] [blame] | 194 | ret = hvm_get_parameter_maintain_dcache(HVM_PARAM_STORE_PFN, &gfn); |
| 195 | if (ret < 0) { |
| 196 | printf("%s: Can't get HVM_PARAM_STORE_PFN, ret %d\n", |
| 197 | __func__, ret); |
| 198 | return -EINVAL; |
| 199 | } |
| 200 | |
| 201 | xen_mem_map[i].virt = PFN_PHYS(gfn); |
| 202 | xen_mem_map[i].phys = PFN_PHYS(gfn); |
| 203 | xen_mem_map[i].size = PAGE_SIZE; |
| 204 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 205 | PTE_BLOCK_INNER_SHARE); |
| 206 | i++; |
| 207 | |
Oleksandr Andrushchenko | 3cc1dcc | 2020-08-06 12:42:54 +0300 | [diff] [blame] | 208 | /* Get Xen's suggested physical page assignments for the grant table. */ |
| 209 | get_gnttab_base(&gnttab_base, &gnttab_sz); |
| 210 | |
| 211 | xen_mem_map[i].virt = gnttab_base; |
| 212 | xen_mem_map[i].phys = gnttab_base; |
| 213 | xen_mem_map[i].size = gnttab_sz; |
| 214 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 215 | PTE_BLOCK_INNER_SHARE); |
| 216 | i++; |
| 217 | |
Peng Fan | e8cff1c | 2024-01-26 18:29:38 +0800 | [diff] [blame] | 218 | if (CONFIG_IS_ENABLED(VIRTIO_MMIO)) { |
| 219 | xen_mem_map[i].virt = GUEST_VIRTIO_MMIO_BASE; |
| 220 | xen_mem_map[i].phys = GUEST_VIRTIO_MMIO_BASE; |
| 221 | xen_mem_map[i].size = GUEST_VIRTIO_MMIO_SIZE; |
| 222 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | |
| 223 | PTE_BLOCK_NON_SHARE); |
| 224 | i++; |
| 225 | } |
| 226 | |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 227 | mem = get_next_memory_node(blob, -1); |
| 228 | if (mem < 0) { |
| 229 | printf("%s: Missing /memory node\n", __func__); |
| 230 | return -EINVAL; |
| 231 | } |
| 232 | |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 233 | for (; i < MAX_MEM_MAP_REGIONS; i++) { |
Peng Fan | e8cff1c | 2024-01-26 18:29:38 +0800 | [diff] [blame] | 234 | if (CONFIG_IS_ENABLED(VIRTIO_MMIO)) { |
| 235 | ret = fdt_node_check_compatible(blob, mem, "virtio,mmio"); |
| 236 | if (!ret) |
| 237 | continue; |
| 238 | } |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 239 | ret = fdt_get_resource(blob, mem, "reg", reg++, &res); |
| 240 | if (ret == -FDT_ERR_NOTFOUND) { |
| 241 | reg = 0; |
| 242 | mem = get_next_memory_node(blob, mem); |
| 243 | if (mem == -FDT_ERR_NOTFOUND) |
| 244 | break; |
| 245 | |
| 246 | ret = fdt_get_resource(blob, mem, "reg", reg++, &res); |
| 247 | if (ret == -FDT_ERR_NOTFOUND) |
| 248 | break; |
| 249 | } |
| 250 | if (ret != 0) { |
| 251 | printf("No reg property for memory node\n"); |
| 252 | return -EINVAL; |
| 253 | } |
| 254 | |
| 255 | xen_mem_map[i].virt = (phys_addr_t)res.start; |
| 256 | xen_mem_map[i].phys = (phys_addr_t)res.start; |
| 257 | xen_mem_map[i].size = (phys_size_t)(res.end - res.start + 1); |
| 258 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 259 | PTE_BLOCK_INNER_SHARE); |
| 260 | } |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 261 | #ifdef CONFIG_VIRTIO_BLK |
| 262 | #ifdef CONFIG_VIRTIO_PCI |
| 263 | add_pci_mem_map(blob, &i); |
| 264 | #endif |
| 265 | #ifdef CONFIG_VIRTIO_MMIO |
| 266 | add_mmio_mem_map(blob, &i); |
| 267 | #endif |
| 268 | #endif |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 269 | return 0; |
| 270 | } |
| 271 | |
| 272 | void enable_caches(void) |
| 273 | { |
| 274 | /* Re-setup the memory map as BSS gets cleared after relocation. */ |
| 275 | setup_mem_map(); |
| 276 | icache_enable(); |
| 277 | dcache_enable(); |
| 278 | } |
| 279 | |
| 280 | /* Read memory settings from the Xen provided device tree. */ |
| 281 | int dram_init(void) |
| 282 | { |
| 283 | int ret; |
| 284 | |
| 285 | ret = fdtdec_setup_mem_size_base(); |
| 286 | if (ret < 0) |
| 287 | return ret; |
| 288 | /* Setup memory map, so MMU page table size can be estimated. */ |
| 289 | return setup_mem_map(); |
| 290 | } |
| 291 | |
| 292 | int dram_init_banksize(void) |
| 293 | { |
| 294 | return fdtdec_setup_memory_banksize(); |
| 295 | } |
| 296 | |
| 297 | /* |
| 298 | * Board specific reset that is system reset. |
| 299 | */ |
Harald Seiler | 6f14d5f | 2020-12-15 16:47:52 +0100 | [diff] [blame] | 300 | void reset_cpu(void) |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 301 | { |
| 302 | } |
| 303 | |
| 304 | int ft_system_setup(void *blob, struct bd_info *bd) |
| 305 | { |
| 306 | return 0; |
| 307 | } |
| 308 | |
| 309 | int ft_board_setup(void *blob, struct bd_info *bd) |
| 310 | { |
| 311 | return 0; |
| 312 | } |
| 313 | |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 314 | int print_cpuinfo(void) |
| 315 | { |
| 316 | printf("Xen virtual CPU\n"); |
| 317 | return 0; |
| 318 | } |
| 319 | |
Oleksandr Andrushchenko | 10fa536 | 2020-08-06 12:43:00 +0300 | [diff] [blame] | 320 | void board_cleanup_before_linux(void) |
| 321 | { |
| 322 | xen_fini(); |
| 323 | } |