Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * (C) 2013 |
| 4 | * David Feng <fenghua@phytium.com.cn> |
| 5 | * Sharma Bhupesh <bhupesh.sharma@freescale.com> |
| 6 | * |
| 7 | * (C) 2020 EPAM Systems Inc |
| 8 | */ |
| 9 | |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 10 | #include <log.h> |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 11 | #include <cpu_func.h> |
| 12 | #include <dm.h> |
| 13 | #include <errno.h> |
| 14 | #include <malloc.h> |
Oleksandr Andrushchenko | 10fa536 | 2020-08-06 12:43:00 +0300 | [diff] [blame] | 15 | #include <xen.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 16 | #include <asm/global_data.h> |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 17 | #include <virtio_types.h> |
| 18 | #include <virtio.h> |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 19 | |
| 20 | #include <asm/io.h> |
| 21 | #include <asm/armv8/mmu.h> |
| 22 | #include <asm/xen.h> |
| 23 | #include <asm/xen/hypercall.h> |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 24 | #include <asm/xen/system.h> |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 25 | |
| 26 | #include <linux/compiler.h> |
| 27 | |
Oleksandr Andrushchenko | 3cc1dcc | 2020-08-06 12:42:54 +0300 | [diff] [blame] | 28 | #include <xen/gnttab.h> |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 29 | #include <xen/hvm.h> |
| 30 | |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 31 | DECLARE_GLOBAL_DATA_PTR; |
| 32 | |
Peng Fan | e8cff1c | 2024-01-26 18:29:38 +0800 | [diff] [blame] | 33 | #define GUEST_VIRTIO_MMIO_BASE 0x2000000 |
| 34 | #define GUEST_VIRTIO_MMIO_SIZE 0x100000 |
| 35 | |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 36 | int board_init(void) |
| 37 | { |
| 38 | return 0; |
| 39 | } |
| 40 | |
| 41 | /* |
| 42 | * Use fdt provided by Xen: according to |
| 43 | * https://www.kernel.org/doc/Documentation/arm64/booting.txt |
| 44 | * x0 is the physical address of the device tree blob (dtb) in system RAM. |
| 45 | * This is stored in rom_pointer during low level init. |
| 46 | */ |
Ilias Apalodimas | ab5348a | 2021-10-26 09:12:33 +0300 | [diff] [blame] | 47 | void *board_fdt_blob_setup(int *err) |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 48 | { |
Ilias Apalodimas | ab5348a | 2021-10-26 09:12:33 +0300 | [diff] [blame] | 49 | *err = 0; |
| 50 | if (fdt_magic(rom_pointer[0]) != FDT_MAGIC) { |
| 51 | *err = -ENXIO; |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 52 | return NULL; |
Ilias Apalodimas | ab5348a | 2021-10-26 09:12:33 +0300 | [diff] [blame] | 53 | } |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 54 | return (void *)rom_pointer[0]; |
| 55 | } |
| 56 | |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 57 | /* |
| 58 | * MAX_MEM_MAP_REGIONS should respect to: |
| 59 | * 3 Xen related regions |
| 60 | * 6 regions for 2 PCI Host bridges |
| 61 | * 10 regions for MMIO devices |
| 62 | * 2 memory regions |
| 63 | */ |
| 64 | #define MAX_MEM_MAP_REGIONS 22 |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 65 | static struct mm_region xen_mem_map[MAX_MEM_MAP_REGIONS]; |
| 66 | struct mm_region *mem_map = xen_mem_map; |
| 67 | |
| 68 | static int get_next_memory_node(const void *blob, int mem) |
| 69 | { |
| 70 | do { |
| 71 | mem = fdt_node_offset_by_prop_value(blob, mem, |
| 72 | "device_type", "memory", 7); |
| 73 | } while (!fdtdec_get_is_enabled(blob, mem)); |
| 74 | |
| 75 | return mem; |
| 76 | } |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 77 | |
| 78 | #ifdef CONFIG_VIRTIO_BLK |
| 79 | #ifdef CONFIG_VIRTIO_PCI |
| 80 | static void add_pci_mem_map(const void *blob, int *cnt) |
| 81 | { |
| 82 | struct fdt_resource reg_res; |
| 83 | int node = -1, len = 0, cells_per_record = 0, max_regions = 0; |
| 84 | int pci_addr_cells = 0, addr_cells = 0, size_cells = 0; |
| 85 | |
| 86 | while ((node = fdt_node_offset_by_prop_value(blob, node, "compatible", |
| 87 | "pci-host-ecam-generic", |
| 88 | sizeof("pci-host-ecam-generic"))) >= 0) { |
| 89 | if ((*cnt) >= MAX_MEM_MAP_REGIONS || |
| 90 | fdt_get_resource(blob, node, "reg", 0, ®_res) < 0) |
| 91 | return; |
| 92 | |
| 93 | xen_mem_map[*cnt].virt = reg_res.start; |
| 94 | xen_mem_map[*cnt].phys = reg_res.start; |
| 95 | xen_mem_map[*cnt].size = fdt_resource_size(®_res); |
| 96 | xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 97 | PTE_BLOCK_INNER_SHARE); |
| 98 | (*cnt)++; |
| 99 | |
| 100 | const u32 *prop = fdt_getprop(blob, node, "ranges", &len); |
| 101 | |
| 102 | if (!prop) |
| 103 | return; |
| 104 | |
| 105 | pci_addr_cells = fdt_address_cells(blob, node); |
| 106 | addr_cells = fdt_address_cells(blob, 0); |
| 107 | size_cells = fdt_size_cells(blob, node); |
| 108 | |
| 109 | /* PCI addresses are always 3-cells */ |
| 110 | len /= sizeof(u32); |
| 111 | cells_per_record = pci_addr_cells + addr_cells + size_cells; |
| 112 | max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS; |
| 113 | |
| 114 | for (int i = 0; i < max_regions; i++, len -= cells_per_record) { |
| 115 | u64 pci_addr, addr, size; |
| 116 | int space_code; |
| 117 | u32 flags; |
| 118 | |
| 119 | if (((*cnt) >= MAX_MEM_MAP_REGIONS) || len < cells_per_record) |
| 120 | return; |
| 121 | |
| 122 | flags = fdt32_to_cpu(prop[0]); |
| 123 | space_code = (flags >> 24) & 3; |
| 124 | pci_addr = fdtdec_get_number(prop + 1, 2); |
| 125 | prop += pci_addr_cells; |
| 126 | addr = fdtdec_get_number(prop, addr_cells); |
| 127 | prop += addr_cells; |
| 128 | size = fdtdec_get_number(prop, size_cells); |
| 129 | prop += size_cells; |
| 130 | |
| 131 | xen_mem_map[*cnt].virt = addr; |
| 132 | xen_mem_map[*cnt].phys = addr; |
| 133 | xen_mem_map[*cnt].size = size; |
| 134 | xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 135 | PTE_BLOCK_INNER_SHARE); |
| 136 | (*cnt)++; |
| 137 | } |
| 138 | } |
| 139 | } |
| 140 | #endif |
| 141 | |
| 142 | #ifdef CONFIG_VIRTIO_MMIO |
| 143 | static void add_mmio_mem_map(const void *blob, int *cnt) |
| 144 | { |
| 145 | int node = -1; |
| 146 | struct fdt_resource reg_res; |
| 147 | |
| 148 | if ((*cnt) >= MAX_MEM_MAP_REGIONS) |
| 149 | return; |
| 150 | while ((node = fdt_node_offset_by_prop_value(blob, node, "compatible", "virtio,mmio", |
| 151 | sizeof("virtio,mmio"))) >= 0) { |
| 152 | if (fdt_get_resource(blob, node, "reg", 0, ®_res) < 0) |
| 153 | return; |
| 154 | xen_mem_map[*cnt].virt = reg_res.start; |
| 155 | xen_mem_map[*cnt].phys = reg_res.start; |
| 156 | xen_mem_map[*cnt].size = roundup(fdt_resource_size(®_res), PAGE_SIZE); |
| 157 | xen_mem_map[*cnt].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 158 | PTE_BLOCK_INNER_SHARE); |
| 159 | (*cnt)++; |
| 160 | } |
| 161 | } |
| 162 | #endif |
| 163 | #endif |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 164 | |
| 165 | static int setup_mem_map(void) |
| 166 | { |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 167 | int i = 0, ret, mem, reg = 0; |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 168 | struct fdt_resource res; |
| 169 | const void *blob = gd->fdt_blob; |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 170 | u64 gfn; |
Oleksandr Andrushchenko | 3cc1dcc | 2020-08-06 12:42:54 +0300 | [diff] [blame] | 171 | phys_addr_t gnttab_base; |
| 172 | phys_size_t gnttab_sz; |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 173 | |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 174 | memset(xen_mem_map, 0, sizeof(xen_mem_map)); |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 175 | /* |
| 176 | * Add "magic" region which is used by Xen to provide some essentials |
Oleksandr Andrushchenko | 4b72845 | 2020-08-06 12:42:53 +0300 | [diff] [blame] | 177 | * for the guest: we need console and xenstore. |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 178 | */ |
| 179 | ret = hvm_get_parameter_maintain_dcache(HVM_PARAM_CONSOLE_PFN, &gfn); |
| 180 | if (ret < 0) { |
| 181 | printf("%s: Can't get HVM_PARAM_CONSOLE_PFN, ret %d\n", |
| 182 | __func__, ret); |
| 183 | return -EINVAL; |
| 184 | } |
| 185 | |
| 186 | xen_mem_map[i].virt = PFN_PHYS(gfn); |
| 187 | xen_mem_map[i].phys = PFN_PHYS(gfn); |
| 188 | xen_mem_map[i].size = PAGE_SIZE; |
| 189 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 190 | PTE_BLOCK_INNER_SHARE); |
| 191 | i++; |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 192 | |
Oleksandr Andrushchenko | 4b72845 | 2020-08-06 12:42:53 +0300 | [diff] [blame] | 193 | ret = hvm_get_parameter_maintain_dcache(HVM_PARAM_STORE_PFN, &gfn); |
| 194 | if (ret < 0) { |
| 195 | printf("%s: Can't get HVM_PARAM_STORE_PFN, ret %d\n", |
| 196 | __func__, ret); |
| 197 | return -EINVAL; |
| 198 | } |
| 199 | |
| 200 | xen_mem_map[i].virt = PFN_PHYS(gfn); |
| 201 | xen_mem_map[i].phys = PFN_PHYS(gfn); |
| 202 | xen_mem_map[i].size = PAGE_SIZE; |
| 203 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 204 | PTE_BLOCK_INNER_SHARE); |
| 205 | i++; |
| 206 | |
Oleksandr Andrushchenko | 3cc1dcc | 2020-08-06 12:42:54 +0300 | [diff] [blame] | 207 | /* Get Xen's suggested physical page assignments for the grant table. */ |
| 208 | get_gnttab_base(&gnttab_base, &gnttab_sz); |
| 209 | |
| 210 | xen_mem_map[i].virt = gnttab_base; |
| 211 | xen_mem_map[i].phys = gnttab_base; |
| 212 | xen_mem_map[i].size = gnttab_sz; |
| 213 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 214 | PTE_BLOCK_INNER_SHARE); |
| 215 | i++; |
| 216 | |
Peng Fan | e8cff1c | 2024-01-26 18:29:38 +0800 | [diff] [blame] | 217 | if (CONFIG_IS_ENABLED(VIRTIO_MMIO)) { |
| 218 | xen_mem_map[i].virt = GUEST_VIRTIO_MMIO_BASE; |
| 219 | xen_mem_map[i].phys = GUEST_VIRTIO_MMIO_BASE; |
| 220 | xen_mem_map[i].size = GUEST_VIRTIO_MMIO_SIZE; |
| 221 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | |
| 222 | PTE_BLOCK_NON_SHARE); |
| 223 | i++; |
| 224 | } |
| 225 | |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 226 | mem = get_next_memory_node(blob, -1); |
| 227 | if (mem < 0) { |
| 228 | printf("%s: Missing /memory node\n", __func__); |
| 229 | return -EINVAL; |
| 230 | } |
| 231 | |
Peng Fan | 8162f8f | 2020-08-06 12:42:50 +0300 | [diff] [blame] | 232 | for (; i < MAX_MEM_MAP_REGIONS; i++) { |
Peng Fan | e8cff1c | 2024-01-26 18:29:38 +0800 | [diff] [blame] | 233 | if (CONFIG_IS_ENABLED(VIRTIO_MMIO)) { |
| 234 | ret = fdt_node_check_compatible(blob, mem, "virtio,mmio"); |
| 235 | if (!ret) |
| 236 | continue; |
| 237 | } |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 238 | ret = fdt_get_resource(blob, mem, "reg", reg++, &res); |
| 239 | if (ret == -FDT_ERR_NOTFOUND) { |
| 240 | reg = 0; |
| 241 | mem = get_next_memory_node(blob, mem); |
| 242 | if (mem == -FDT_ERR_NOTFOUND) |
| 243 | break; |
| 244 | |
| 245 | ret = fdt_get_resource(blob, mem, "reg", reg++, &res); |
| 246 | if (ret == -FDT_ERR_NOTFOUND) |
| 247 | break; |
| 248 | } |
| 249 | if (ret != 0) { |
| 250 | printf("No reg property for memory node\n"); |
| 251 | return -EINVAL; |
| 252 | } |
| 253 | |
| 254 | xen_mem_map[i].virt = (phys_addr_t)res.start; |
| 255 | xen_mem_map[i].phys = (phys_addr_t)res.start; |
| 256 | xen_mem_map[i].size = (phys_size_t)(res.end - res.start + 1); |
| 257 | xen_mem_map[i].attrs = (PTE_BLOCK_MEMTYPE(MT_NORMAL) | |
| 258 | PTE_BLOCK_INNER_SHARE); |
| 259 | } |
Andrii Chepurnyi | 2b6c9b5 | 2023-10-03 08:58:28 +0000 | [diff] [blame] | 260 | #ifdef CONFIG_VIRTIO_BLK |
| 261 | #ifdef CONFIG_VIRTIO_PCI |
| 262 | add_pci_mem_map(blob, &i); |
| 263 | #endif |
| 264 | #ifdef CONFIG_VIRTIO_MMIO |
| 265 | add_mmio_mem_map(blob, &i); |
| 266 | #endif |
| 267 | #endif |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 268 | return 0; |
| 269 | } |
| 270 | |
| 271 | void enable_caches(void) |
| 272 | { |
| 273 | /* Re-setup the memory map as BSS gets cleared after relocation. */ |
| 274 | setup_mem_map(); |
| 275 | icache_enable(); |
| 276 | dcache_enable(); |
| 277 | } |
| 278 | |
| 279 | /* Read memory settings from the Xen provided device tree. */ |
| 280 | int dram_init(void) |
| 281 | { |
| 282 | int ret; |
| 283 | |
| 284 | ret = fdtdec_setup_mem_size_base(); |
| 285 | if (ret < 0) |
| 286 | return ret; |
| 287 | /* Setup memory map, so MMU page table size can be estimated. */ |
| 288 | return setup_mem_map(); |
| 289 | } |
| 290 | |
| 291 | int dram_init_banksize(void) |
| 292 | { |
| 293 | return fdtdec_setup_memory_banksize(); |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * Board specific reset that is system reset. |
| 298 | */ |
Harald Seiler | 6f14d5f | 2020-12-15 16:47:52 +0100 | [diff] [blame] | 299 | void reset_cpu(void) |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 300 | { |
| 301 | } |
| 302 | |
| 303 | int ft_system_setup(void *blob, struct bd_info *bd) |
| 304 | { |
| 305 | return 0; |
| 306 | } |
| 307 | |
| 308 | int ft_board_setup(void *blob, struct bd_info *bd) |
| 309 | { |
| 310 | return 0; |
| 311 | } |
| 312 | |
Andrii Anisov | 355d1e4 | 2020-08-06 12:42:47 +0300 | [diff] [blame] | 313 | int print_cpuinfo(void) |
| 314 | { |
| 315 | printf("Xen virtual CPU\n"); |
| 316 | return 0; |
| 317 | } |
| 318 | |
Oleksandr Andrushchenko | 10fa536 | 2020-08-06 12:43:00 +0300 | [diff] [blame] | 319 | void board_cleanup_before_linux(void) |
| 320 | { |
| 321 | xen_fini(); |
| 322 | } |