Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 1 | /* |
| 2 | * EFI application memory management |
| 3 | * |
| 4 | * Copyright (c) 2016 Alexander Graf |
| 5 | * |
| 6 | * SPDX-License-Identifier: GPL-2.0+ |
| 7 | */ |
| 8 | |
| 9 | /* #define DEBUG_EFI */ |
| 10 | |
| 11 | #include <common.h> |
| 12 | #include <efi_loader.h> |
| 13 | #include <malloc.h> |
| 14 | #include <asm/global_data.h> |
| 15 | #include <libfdt_env.h> |
Alexander Graf | de2a13b | 2016-03-30 16:38:29 +0200 | [diff] [blame] | 16 | #include <linux/list_sort.h> |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 17 | #include <inttypes.h> |
| 18 | #include <watchdog.h> |
| 19 | |
| 20 | DECLARE_GLOBAL_DATA_PTR; |
| 21 | |
| 22 | struct efi_mem_list { |
| 23 | struct list_head link; |
| 24 | struct efi_mem_desc desc; |
| 25 | }; |
| 26 | |
| 27 | /* This list contains all memory map items */ |
| 28 | LIST_HEAD(efi_mem); |
| 29 | |
Alexander Graf | 7c00a3c | 2016-05-11 18:25:48 +0200 | [diff] [blame] | 30 | #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER |
| 31 | void *efi_bounce_buffer; |
| 32 | #endif |
| 33 | |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 34 | /* |
Alexander Graf | de2a13b | 2016-03-30 16:38:29 +0200 | [diff] [blame] | 35 | * Sorts the memory list from highest address to lowest address |
| 36 | * |
| 37 | * When allocating memory we should always start from the highest |
| 38 | * address chunk, so sort the memory list such that the first list |
| 39 | * iterator gets the highest address and goes lower from there. |
| 40 | */ |
| 41 | static int efi_mem_cmp(void *priv, struct list_head *a, struct list_head *b) |
| 42 | { |
| 43 | struct efi_mem_list *mema = list_entry(a, struct efi_mem_list, link); |
| 44 | struct efi_mem_list *memb = list_entry(b, struct efi_mem_list, link); |
| 45 | |
| 46 | if (mema->desc.physical_start == memb->desc.physical_start) |
| 47 | return 0; |
| 48 | else if (mema->desc.physical_start < memb->desc.physical_start) |
| 49 | return 1; |
| 50 | else |
| 51 | return -1; |
| 52 | } |
| 53 | |
| 54 | static void efi_mem_sort(void) |
| 55 | { |
| 56 | list_sort(NULL, &efi_mem, efi_mem_cmp); |
| 57 | } |
| 58 | |
| 59 | /* |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 60 | * Unmaps all memory occupied by the carve_desc region from the |
| 61 | * list entry pointed to by map. |
| 62 | * |
| 63 | * Returns 1 if carving was performed or 0 if the regions don't overlap. |
| 64 | * Returns -1 if it would affect non-RAM regions but overlap_only_ram is set. |
| 65 | * Carving is only guaranteed to complete when all regions return 0. |
| 66 | */ |
| 67 | static int efi_mem_carve_out(struct efi_mem_list *map, |
| 68 | struct efi_mem_desc *carve_desc, |
| 69 | bool overlap_only_ram) |
| 70 | { |
| 71 | struct efi_mem_list *newmap; |
| 72 | struct efi_mem_desc *map_desc = &map->desc; |
| 73 | uint64_t map_start = map_desc->physical_start; |
| 74 | uint64_t map_end = map_start + (map_desc->num_pages << EFI_PAGE_SHIFT); |
| 75 | uint64_t carve_start = carve_desc->physical_start; |
| 76 | uint64_t carve_end = carve_start + |
| 77 | (carve_desc->num_pages << EFI_PAGE_SHIFT); |
| 78 | |
| 79 | /* check whether we're overlapping */ |
| 80 | if ((carve_end <= map_start) || (carve_start >= map_end)) |
| 81 | return 0; |
| 82 | |
| 83 | /* We're overlapping with non-RAM, warn the caller if desired */ |
| 84 | if (overlap_only_ram && (map_desc->type != EFI_CONVENTIONAL_MEMORY)) |
| 85 | return -1; |
| 86 | |
| 87 | /* Sanitize carve_start and carve_end to lie within our bounds */ |
| 88 | carve_start = max(carve_start, map_start); |
| 89 | carve_end = min(carve_end, map_end); |
| 90 | |
| 91 | /* Carving at the beginning of our map? Just move it! */ |
| 92 | if (carve_start == map_start) { |
| 93 | if (map_end == carve_end) { |
| 94 | /* Full overlap, just remove map */ |
| 95 | list_del(&map->link); |
| 96 | } |
| 97 | |
| 98 | map_desc->physical_start = carve_end; |
| 99 | map_desc->num_pages = (map_end - carve_end) >> EFI_PAGE_SHIFT; |
| 100 | return 1; |
| 101 | } |
| 102 | |
| 103 | /* |
| 104 | * Overlapping maps, just split the list map at carve_start, |
| 105 | * it will get moved or removed in the next iteration. |
| 106 | * |
| 107 | * [ map_desc |__carve_start__| newmap ] |
| 108 | */ |
| 109 | |
| 110 | /* Create a new map from [ carve_start ... map_end ] */ |
| 111 | newmap = calloc(1, sizeof(*newmap)); |
| 112 | newmap->desc = map->desc; |
| 113 | newmap->desc.physical_start = carve_start; |
| 114 | newmap->desc.num_pages = (map_end - carve_start) >> EFI_PAGE_SHIFT; |
| 115 | list_add_tail(&newmap->link, &efi_mem); |
| 116 | |
| 117 | /* Shrink the map to [ map_start ... carve_start ] */ |
| 118 | map_desc->num_pages = (carve_start - map_start) >> EFI_PAGE_SHIFT; |
| 119 | |
| 120 | return 1; |
| 121 | } |
| 122 | |
| 123 | uint64_t efi_add_memory_map(uint64_t start, uint64_t pages, int memory_type, |
| 124 | bool overlap_only_ram) |
| 125 | { |
| 126 | struct list_head *lhandle; |
| 127 | struct efi_mem_list *newlist; |
| 128 | bool do_carving; |
| 129 | |
| 130 | if (!pages) |
| 131 | return start; |
| 132 | |
| 133 | newlist = calloc(1, sizeof(*newlist)); |
| 134 | newlist->desc.type = memory_type; |
| 135 | newlist->desc.physical_start = start; |
| 136 | newlist->desc.virtual_start = start; |
| 137 | newlist->desc.num_pages = pages; |
| 138 | |
| 139 | switch (memory_type) { |
| 140 | case EFI_RUNTIME_SERVICES_CODE: |
| 141 | case EFI_RUNTIME_SERVICES_DATA: |
| 142 | newlist->desc.attribute = (1 << EFI_MEMORY_WB_SHIFT) | |
| 143 | (1ULL << EFI_MEMORY_RUNTIME_SHIFT); |
| 144 | break; |
| 145 | case EFI_MMAP_IO: |
| 146 | newlist->desc.attribute = 1ULL << EFI_MEMORY_RUNTIME_SHIFT; |
| 147 | break; |
| 148 | default: |
| 149 | newlist->desc.attribute = 1 << EFI_MEMORY_WB_SHIFT; |
| 150 | break; |
| 151 | } |
| 152 | |
| 153 | /* Add our new map */ |
| 154 | do { |
| 155 | do_carving = false; |
| 156 | list_for_each(lhandle, &efi_mem) { |
| 157 | struct efi_mem_list *lmem; |
| 158 | int r; |
| 159 | |
| 160 | lmem = list_entry(lhandle, struct efi_mem_list, link); |
| 161 | r = efi_mem_carve_out(lmem, &newlist->desc, |
| 162 | overlap_only_ram); |
| 163 | if (r < 0) { |
| 164 | return 0; |
| 165 | } else if (r) { |
| 166 | do_carving = true; |
| 167 | break; |
| 168 | } |
| 169 | } |
| 170 | } while (do_carving); |
| 171 | |
| 172 | /* Add our new map */ |
| 173 | list_add_tail(&newlist->link, &efi_mem); |
| 174 | |
Alexander Graf | de2a13b | 2016-03-30 16:38:29 +0200 | [diff] [blame] | 175 | /* And make sure memory is listed in descending order */ |
| 176 | efi_mem_sort(); |
| 177 | |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 178 | return start; |
| 179 | } |
| 180 | |
| 181 | static uint64_t efi_find_free_memory(uint64_t len, uint64_t max_addr) |
| 182 | { |
| 183 | struct list_head *lhandle; |
| 184 | |
| 185 | list_for_each(lhandle, &efi_mem) { |
| 186 | struct efi_mem_list *lmem = list_entry(lhandle, |
| 187 | struct efi_mem_list, link); |
| 188 | struct efi_mem_desc *desc = &lmem->desc; |
| 189 | uint64_t desc_len = desc->num_pages << EFI_PAGE_SHIFT; |
| 190 | uint64_t desc_end = desc->physical_start + desc_len; |
| 191 | uint64_t curmax = min(max_addr, desc_end); |
| 192 | uint64_t ret = curmax - len; |
| 193 | |
| 194 | /* We only take memory from free RAM */ |
| 195 | if (desc->type != EFI_CONVENTIONAL_MEMORY) |
| 196 | continue; |
| 197 | |
| 198 | /* Out of bounds for max_addr */ |
| 199 | if ((ret + len) > max_addr) |
| 200 | continue; |
| 201 | |
| 202 | /* Out of bounds for upper map limit */ |
| 203 | if ((ret + len) > desc_end) |
| 204 | continue; |
| 205 | |
| 206 | /* Out of bounds for lower map limit */ |
| 207 | if (ret < desc->physical_start) |
| 208 | continue; |
| 209 | |
| 210 | /* Return the highest address in this map within bounds */ |
| 211 | return ret; |
| 212 | } |
| 213 | |
| 214 | return 0; |
| 215 | } |
| 216 | |
| 217 | efi_status_t efi_allocate_pages(int type, int memory_type, |
| 218 | unsigned long pages, uint64_t *memory) |
| 219 | { |
| 220 | u64 len = pages << EFI_PAGE_SHIFT; |
| 221 | efi_status_t r = EFI_SUCCESS; |
| 222 | uint64_t addr; |
| 223 | |
| 224 | switch (type) { |
| 225 | case 0: |
| 226 | /* Any page */ |
Andreas Färber | d40f71e | 2016-04-13 14:04:38 +0200 | [diff] [blame] | 227 | addr = efi_find_free_memory(len, gd->start_addr_sp); |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 228 | if (!addr) { |
| 229 | r = EFI_NOT_FOUND; |
| 230 | break; |
| 231 | } |
| 232 | break; |
| 233 | case 1: |
| 234 | /* Max address */ |
| 235 | addr = efi_find_free_memory(len, *memory); |
| 236 | if (!addr) { |
| 237 | r = EFI_NOT_FOUND; |
| 238 | break; |
| 239 | } |
| 240 | break; |
| 241 | case 2: |
| 242 | /* Exact address, reserve it. The addr is already in *memory. */ |
| 243 | addr = *memory; |
| 244 | break; |
| 245 | default: |
| 246 | /* UEFI doesn't specify other allocation types */ |
| 247 | r = EFI_INVALID_PARAMETER; |
| 248 | break; |
| 249 | } |
| 250 | |
| 251 | if (r == EFI_SUCCESS) { |
| 252 | uint64_t ret; |
| 253 | |
| 254 | /* Reserve that map in our memory maps */ |
| 255 | ret = efi_add_memory_map(addr, pages, memory_type, true); |
| 256 | if (ret == addr) { |
| 257 | *memory = addr; |
| 258 | } else { |
| 259 | /* Map would overlap, bail out */ |
| 260 | r = EFI_OUT_OF_RESOURCES; |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | return r; |
| 265 | } |
| 266 | |
| 267 | void *efi_alloc(uint64_t len, int memory_type) |
| 268 | { |
| 269 | uint64_t ret = 0; |
| 270 | uint64_t pages = (len + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; |
| 271 | efi_status_t r; |
| 272 | |
| 273 | r = efi_allocate_pages(0, memory_type, pages, &ret); |
| 274 | if (r == EFI_SUCCESS) |
| 275 | return (void*)(uintptr_t)ret; |
| 276 | |
| 277 | return NULL; |
| 278 | } |
| 279 | |
| 280 | efi_status_t efi_free_pages(uint64_t memory, unsigned long pages) |
| 281 | { |
| 282 | /* We don't free, let's cross our fingers we have plenty RAM */ |
| 283 | return EFI_SUCCESS; |
| 284 | } |
| 285 | |
| 286 | efi_status_t efi_get_memory_map(unsigned long *memory_map_size, |
| 287 | struct efi_mem_desc *memory_map, |
| 288 | unsigned long *map_key, |
| 289 | unsigned long *descriptor_size, |
| 290 | uint32_t *descriptor_version) |
| 291 | { |
| 292 | ulong map_size = 0; |
Alexander Graf | 9b59102 | 2016-04-11 23:51:02 +0200 | [diff] [blame] | 293 | int map_entries = 0; |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 294 | struct list_head *lhandle; |
| 295 | |
| 296 | list_for_each(lhandle, &efi_mem) |
Alexander Graf | 9b59102 | 2016-04-11 23:51:02 +0200 | [diff] [blame] | 297 | map_entries++; |
| 298 | |
| 299 | map_size = map_entries * sizeof(struct efi_mem_desc); |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 300 | |
| 301 | *memory_map_size = map_size; |
| 302 | |
| 303 | if (descriptor_size) |
| 304 | *descriptor_size = sizeof(struct efi_mem_desc); |
| 305 | |
| 306 | if (*memory_map_size < map_size) |
| 307 | return EFI_BUFFER_TOO_SMALL; |
| 308 | |
| 309 | /* Copy list into array */ |
| 310 | if (memory_map) { |
Alexander Graf | 9b59102 | 2016-04-11 23:51:02 +0200 | [diff] [blame] | 311 | /* Return the list in ascending order */ |
| 312 | memory_map = &memory_map[map_entries - 1]; |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 313 | list_for_each(lhandle, &efi_mem) { |
| 314 | struct efi_mem_list *lmem; |
| 315 | |
| 316 | lmem = list_entry(lhandle, struct efi_mem_list, link); |
| 317 | *memory_map = lmem->desc; |
Alexander Graf | 9b59102 | 2016-04-11 23:51:02 +0200 | [diff] [blame] | 318 | memory_map--; |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 319 | } |
| 320 | } |
| 321 | |
| 322 | return EFI_SUCCESS; |
| 323 | } |
| 324 | |
| 325 | int efi_memory_init(void) |
| 326 | { |
Andreas Färber | d40f71e | 2016-04-13 14:04:38 +0200 | [diff] [blame] | 327 | unsigned long runtime_start, runtime_end, runtime_pages; |
| 328 | unsigned long uboot_start, uboot_pages; |
| 329 | unsigned long uboot_stack_size = 16 * 1024 * 1024; |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 330 | int i; |
| 331 | |
| 332 | /* Add RAM */ |
| 333 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { |
| 334 | u64 ram_start = gd->bd->bi_dram[i].start; |
| 335 | u64 ram_size = gd->bd->bi_dram[i].size; |
| 336 | u64 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; |
| 337 | u64 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; |
| 338 | |
| 339 | efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, |
| 340 | false); |
| 341 | } |
| 342 | |
| 343 | /* Add U-Boot */ |
| 344 | uboot_start = (gd->start_addr_sp - uboot_stack_size) & ~EFI_PAGE_MASK; |
| 345 | uboot_pages = (gd->ram_top - uboot_start) >> EFI_PAGE_SHIFT; |
| 346 | efi_add_memory_map(uboot_start, uboot_pages, EFI_LOADER_DATA, false); |
| 347 | |
| 348 | /* Add Runtime Services */ |
| 349 | runtime_start = (ulong)&__efi_runtime_start & ~EFI_PAGE_MASK; |
| 350 | runtime_end = (ulong)&__efi_runtime_stop; |
| 351 | runtime_end = (runtime_end + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; |
| 352 | runtime_pages = (runtime_end - runtime_start) >> EFI_PAGE_SHIFT; |
| 353 | efi_add_memory_map(runtime_start, runtime_pages, |
| 354 | EFI_RUNTIME_SERVICES_CODE, false); |
| 355 | |
Alexander Graf | 7c00a3c | 2016-05-11 18:25:48 +0200 | [diff] [blame] | 356 | #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER |
| 357 | /* Request a 32bit 64MB bounce buffer region */ |
| 358 | uint64_t efi_bounce_buffer_addr = 0xffffffff; |
| 359 | |
| 360 | if (efi_allocate_pages(1, EFI_LOADER_DATA, |
| 361 | (64 * 1024 * 1024) >> EFI_PAGE_SHIFT, |
| 362 | &efi_bounce_buffer_addr) != EFI_SUCCESS) |
| 363 | return -1; |
| 364 | |
| 365 | efi_bounce_buffer = (void*)(uintptr_t)efi_bounce_buffer_addr; |
| 366 | #endif |
| 367 | |
Alexander Graf | 8623f92 | 2016-03-04 01:10:04 +0100 | [diff] [blame] | 368 | return 0; |
| 369 | } |