Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Procedures for maintaining information about logical memory blocks. |
| 4 | * |
| 5 | * Peter Bergner, IBM Corp. June 2001. |
| 6 | * Copyright (C) 2001 Peter Bergner. |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <common.h> |
Simon Glass | 2dc9c34 | 2020-05-10 11:40:01 -0600 | [diff] [blame] | 10 | #include <image.h> |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 11 | #include <lmb.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 12 | #include <log.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 13 | #include <malloc.h> |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 14 | |
Marek Vasut | a2eec02 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 15 | #include <asm/global_data.h> |
Marek Vasut | 0fcae7f | 2021-11-13 18:34:37 +0100 | [diff] [blame] | 16 | #include <asm/sections.h> |
Marek Vasut | a2eec02 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 17 | |
| 18 | DECLARE_GLOBAL_DATA_PTR; |
| 19 | |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 20 | #define LMB_ALLOC_ANYWHERE 0 |
| 21 | |
Patrick Delaunay | 4812002 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 22 | static void lmb_dump_region(struct lmb_region *rgn, char *name) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 23 | { |
Patrick Delaunay | 4812002 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 24 | unsigned long long base, size, end; |
| 25 | enum lmb_flags flags; |
| 26 | int i; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 27 | |
Patrick Delaunay | 4812002 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 28 | printf(" %s.cnt = 0x%lx\n", name, rgn->cnt); |
| 29 | |
| 30 | for (i = 0; i < rgn->cnt; i++) { |
| 31 | base = rgn->region[i].base; |
| 32 | size = rgn->region[i].size; |
| 33 | end = base + size - 1; |
| 34 | flags = rgn->region[i].flags; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 35 | |
Patrick Delaunay | 4812002 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 36 | printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n", |
| 37 | name, i, base, end, size, flags); |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 38 | } |
Tero Kristo | 97c418b | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 39 | } |
| 40 | |
Patrick Delaunay | 4812002 | 2021-05-07 14:50:31 +0200 | [diff] [blame] | 41 | void lmb_dump_all_force(struct lmb *lmb) |
| 42 | { |
| 43 | printf("lmb_dump_all:\n"); |
| 44 | lmb_dump_region(&lmb->memory, "memory"); |
| 45 | lmb_dump_region(&lmb->reserved, "reserved"); |
| 46 | } |
| 47 | |
Tero Kristo | 97c418b | 2020-07-20 11:10:45 +0300 | [diff] [blame] | 48 | void lmb_dump_all(struct lmb *lmb) |
| 49 | { |
| 50 | #ifdef DEBUG |
| 51 | lmb_dump_all_force(lmb); |
| 52 | #endif |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 53 | } |
| 54 | |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 55 | static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1, |
| 56 | phys_addr_t base2, phys_size_t size2) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 57 | { |
Simon Goldschmidt | 6402d9b | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 58 | const phys_addr_t base1_end = base1 + size1 - 1; |
| 59 | const phys_addr_t base2_end = base2 + size2 - 1; |
| 60 | |
| 61 | return ((base1 <= base2_end) && (base2 <= base1_end)); |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 62 | } |
| 63 | |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 64 | static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 65 | phys_addr_t base2, phys_size_t size2) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 66 | { |
| 67 | if (base2 == base1 + size1) |
| 68 | return 1; |
| 69 | else if (base1 == base2 + size2) |
| 70 | return -1; |
| 71 | |
| 72 | return 0; |
| 73 | } |
| 74 | |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 75 | static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, |
| 76 | unsigned long r2) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 77 | { |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 78 | phys_addr_t base1 = rgn->region[r1].base; |
| 79 | phys_size_t size1 = rgn->region[r1].size; |
| 80 | phys_addr_t base2 = rgn->region[r2].base; |
| 81 | phys_size_t size2 = rgn->region[r2].size; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 82 | |
| 83 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
| 84 | } |
| 85 | |
| 86 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
| 87 | { |
| 88 | unsigned long i; |
| 89 | |
| 90 | for (i = r; i < rgn->cnt - 1; i++) { |
| 91 | rgn->region[i].base = rgn->region[i + 1].base; |
| 92 | rgn->region[i].size = rgn->region[i + 1].size; |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 93 | rgn->region[i].flags = rgn->region[i + 1].flags; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 94 | } |
| 95 | rgn->cnt--; |
| 96 | } |
| 97 | |
| 98 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 99 | static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, |
| 100 | unsigned long r2) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 101 | { |
| 102 | rgn->region[r1].size += rgn->region[r2].size; |
| 103 | lmb_remove_region(rgn, r2); |
| 104 | } |
| 105 | |
| 106 | void lmb_init(struct lmb *lmb) |
| 107 | { |
Patrick Delaunay | 71cc9c5 | 2021-03-10 10:16:31 +0100 | [diff] [blame] | 108 | #if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS) |
Patrick Delaunay | cd831af | 2021-03-10 10:16:28 +0100 | [diff] [blame] | 109 | lmb->memory.max = CONFIG_LMB_MAX_REGIONS; |
| 110 | lmb->reserved.max = CONFIG_LMB_MAX_REGIONS; |
Patrice Chotard | e1eab08 | 2022-08-02 10:21:35 +0200 | [diff] [blame] | 111 | #elif defined(CONFIG_LMB_MEMORY_REGIONS) |
Patrick Delaunay | 71cc9c5 | 2021-03-10 10:16:31 +0100 | [diff] [blame] | 112 | lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS; |
| 113 | lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS; |
| 114 | lmb->memory.region = lmb->memory_regions; |
| 115 | lmb->reserved.region = lmb->reserved_regions; |
| 116 | #endif |
Simon Goldschmidt | 6402d9b | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 117 | lmb->memory.cnt = 0; |
Simon Goldschmidt | 6402d9b | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 118 | lmb->reserved.cnt = 0; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 119 | } |
| 120 | |
Marek Vasut | a2eec02 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 121 | void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align) |
| 122 | { |
| 123 | ulong bank_end; |
| 124 | int bank; |
| 125 | |
| 126 | /* |
| 127 | * Reserve memory from aligned address below the bottom of U-Boot stack |
| 128 | * until end of U-Boot area using LMB to prevent U-Boot from overwriting |
| 129 | * that memory. |
| 130 | */ |
| 131 | debug("## Current stack ends at 0x%08lx ", sp); |
| 132 | |
| 133 | /* adjust sp by 4K to be safe */ |
| 134 | sp -= align; |
| 135 | for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { |
| 136 | if (!gd->bd->bi_dram[bank].size || |
| 137 | sp < gd->bd->bi_dram[bank].start) |
| 138 | continue; |
| 139 | /* Watch out for RAM at end of address space! */ |
| 140 | bank_end = gd->bd->bi_dram[bank].start + |
| 141 | gd->bd->bi_dram[bank].size - 1; |
| 142 | if (sp > bank_end) |
| 143 | continue; |
| 144 | if (bank_end > end) |
| 145 | bank_end = end - 1; |
| 146 | |
| 147 | lmb_reserve(lmb, sp, bank_end - sp + 1); |
Marek Vasut | 0fcae7f | 2021-11-13 18:34:37 +0100 | [diff] [blame] | 148 | |
| 149 | if (gd->flags & GD_FLG_SKIP_RELOC) |
| 150 | lmb_reserve(lmb, (phys_addr_t)(uintptr_t)_start, gd->mon_len); |
| 151 | |
Marek Vasut | a2eec02 | 2021-09-10 22:47:09 +0200 | [diff] [blame] | 152 | break; |
| 153 | } |
| 154 | } |
| 155 | |
Simon Goldschmidt | 8890e7d | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 156 | static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob) |
Simon Goldschmidt | 5b2c687 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 157 | { |
Simon Goldschmidt | 5b2c687 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 158 | arch_lmb_reserve(lmb); |
| 159 | board_lmb_reserve(lmb); |
| 160 | |
Simon Glass | 85c057e | 2021-09-25 19:43:21 -0600 | [diff] [blame] | 161 | if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob) |
Simon Goldschmidt | 5b2c687 | 2019-01-14 22:38:19 +0100 | [diff] [blame] | 162 | boot_fdt_add_mem_rsv_regions(lmb, fdt_blob); |
| 163 | } |
| 164 | |
Simon Goldschmidt | 8890e7d | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 165 | /* Initialize the struct, add memory and call arch/board reserve functions */ |
Masahiro Yamada | f7ed78b | 2020-06-26 15:13:33 +0900 | [diff] [blame] | 166 | void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob) |
Simon Goldschmidt | 8890e7d | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 167 | { |
Simon Goldschmidt | 8890e7d | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 168 | int i; |
Simon Goldschmidt | 8890e7d | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 169 | |
| 170 | lmb_init(lmb); |
Stefan Roese | bbc8846 | 2020-08-12 11:55:46 +0200 | [diff] [blame] | 171 | |
Simon Goldschmidt | 8890e7d | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 172 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { |
| 173 | if (bd->bi_dram[i].size) { |
| 174 | lmb_add(lmb, bd->bi_dram[i].start, |
| 175 | bd->bi_dram[i].size); |
| 176 | } |
| 177 | } |
Stefan Roese | bbc8846 | 2020-08-12 11:55:46 +0200 | [diff] [blame] | 178 | |
Simon Goldschmidt | 8890e7d | 2019-01-26 22:13:04 +0100 | [diff] [blame] | 179 | lmb_reserve_common(lmb, fdt_blob); |
| 180 | } |
| 181 | |
| 182 | /* Initialize the struct, add memory and call arch/board reserve functions */ |
| 183 | void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base, |
| 184 | phys_size_t size, void *fdt_blob) |
| 185 | { |
| 186 | lmb_init(lmb); |
| 187 | lmb_add(lmb, base, size); |
| 188 | lmb_reserve_common(lmb, fdt_blob); |
| 189 | } |
| 190 | |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 191 | /* This routine called with relocation disabled. */ |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 192 | static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base, |
| 193 | phys_size_t size, enum lmb_flags flags) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 194 | { |
| 195 | unsigned long coalesced = 0; |
| 196 | long adjacent, i; |
| 197 | |
Simon Goldschmidt | 6402d9b | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 198 | if (rgn->cnt == 0) { |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 199 | rgn->region[0].base = base; |
| 200 | rgn->region[0].size = size; |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 201 | rgn->region[0].flags = flags; |
Simon Goldschmidt | 6402d9b | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 202 | rgn->cnt = 1; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | /* First try and coalesce this LMB with another. */ |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 207 | for (i = 0; i < rgn->cnt; i++) { |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 208 | phys_addr_t rgnbase = rgn->region[i].base; |
| 209 | phys_size_t rgnsize = rgn->region[i].size; |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 210 | phys_size_t rgnflags = rgn->region[i].flags; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 211 | |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 212 | if (rgnbase == base && rgnsize == size) { |
| 213 | if (flags == rgnflags) |
| 214 | /* Already have this region, so we're done */ |
| 215 | return 0; |
| 216 | else |
| 217 | return -1; /* regions with new flags */ |
| 218 | } |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 219 | |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 220 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); |
| 221 | if (adjacent > 0) { |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 222 | if (flags != rgnflags) |
| 223 | break; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 224 | rgn->region[i].base -= size; |
| 225 | rgn->region[i].size += size; |
| 226 | coalesced++; |
| 227 | break; |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 228 | } else if (adjacent < 0) { |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 229 | if (flags != rgnflags) |
| 230 | break; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 231 | rgn->region[i].size += size; |
| 232 | coalesced++; |
| 233 | break; |
Simon Goldschmidt | cb57d13 | 2019-01-14 22:38:16 +0100 | [diff] [blame] | 234 | } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { |
| 235 | /* regions overlap */ |
| 236 | return -1; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 237 | } |
| 238 | } |
| 239 | |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 240 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) { |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 241 | if (rgn->region[i].flags == rgn->region[i + 1].flags) { |
| 242 | lmb_coalesce_regions(rgn, i, i + 1); |
| 243 | coalesced++; |
| 244 | } |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | if (coalesced) |
| 248 | return coalesced; |
Patrick Delaunay | 8c69fc2 | 2021-03-10 10:16:27 +0100 | [diff] [blame] | 249 | if (rgn->cnt >= rgn->max) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 250 | return -1; |
| 251 | |
| 252 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ |
| 253 | for (i = rgn->cnt-1; i >= 0; i--) { |
| 254 | if (base < rgn->region[i].base) { |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 255 | rgn->region[i + 1].base = rgn->region[i].base; |
| 256 | rgn->region[i + 1].size = rgn->region[i].size; |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 257 | rgn->region[i + 1].flags = rgn->region[i].flags; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 258 | } else { |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 259 | rgn->region[i + 1].base = base; |
| 260 | rgn->region[i + 1].size = size; |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 261 | rgn->region[i + 1].flags = flags; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 262 | break; |
| 263 | } |
| 264 | } |
| 265 | |
| 266 | if (base < rgn->region[0].base) { |
| 267 | rgn->region[0].base = base; |
| 268 | rgn->region[0].size = size; |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 269 | rgn->region[0].flags = flags; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | rgn->cnt++; |
| 273 | |
| 274 | return 0; |
| 275 | } |
| 276 | |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 277 | static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, |
| 278 | phys_size_t size) |
| 279 | { |
| 280 | return lmb_add_region_flags(rgn, base, size, LMB_NONE); |
| 281 | } |
| 282 | |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 283 | /* This routine may be called with relocation disabled. */ |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 284 | long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 285 | { |
| 286 | struct lmb_region *_rgn = &(lmb->memory); |
| 287 | |
| 288 | return lmb_add_region(_rgn, base, size); |
| 289 | } |
| 290 | |
Andy Fleming | 09d2a71 | 2008-07-07 14:24:39 -0500 | [diff] [blame] | 291 | long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
Andy Fleming | 1ae346c | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 292 | { |
| 293 | struct lmb_region *rgn = &(lmb->reserved); |
Andy Fleming | 09d2a71 | 2008-07-07 14:24:39 -0500 | [diff] [blame] | 294 | phys_addr_t rgnbegin, rgnend; |
Simon Goldschmidt | 6402d9b | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 295 | phys_addr_t end = base + size - 1; |
Andy Fleming | 1ae346c | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 296 | int i; |
| 297 | |
| 298 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
| 299 | |
| 300 | /* Find the region where (base, size) belongs to */ |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 301 | for (i = 0; i < rgn->cnt; i++) { |
Andy Fleming | 1ae346c | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 302 | rgnbegin = rgn->region[i].base; |
Simon Goldschmidt | 6402d9b | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 303 | rgnend = rgnbegin + rgn->region[i].size - 1; |
Andy Fleming | 1ae346c | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 304 | |
| 305 | if ((rgnbegin <= base) && (end <= rgnend)) |
| 306 | break; |
| 307 | } |
| 308 | |
| 309 | /* Didn't find the region */ |
| 310 | if (i == rgn->cnt) |
| 311 | return -1; |
| 312 | |
| 313 | /* Check to see if we are removing entire region */ |
| 314 | if ((rgnbegin == base) && (rgnend == end)) { |
| 315 | lmb_remove_region(rgn, i); |
| 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | /* Check to see if region is matching at the front */ |
| 320 | if (rgnbegin == base) { |
Simon Goldschmidt | 6402d9b | 2019-01-14 22:38:15 +0100 | [diff] [blame] | 321 | rgn->region[i].base = end + 1; |
Andy Fleming | 1ae346c | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 322 | rgn->region[i].size -= size; |
| 323 | return 0; |
| 324 | } |
| 325 | |
| 326 | /* Check to see if the region is matching at the end */ |
| 327 | if (rgnend == end) { |
| 328 | rgn->region[i].size -= size; |
| 329 | return 0; |
| 330 | } |
| 331 | |
| 332 | /* |
| 333 | * We need to split the entry - adjust the current one to the |
| 334 | * beginging of the hole and add the region after hole. |
| 335 | */ |
| 336 | rgn->region[i].size = base - rgn->region[i].base; |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 337 | return lmb_add_region_flags(rgn, end + 1, rgnend - end, |
| 338 | rgn->region[i].flags); |
Andy Fleming | 1ae346c | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 339 | } |
| 340 | |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 341 | long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size, |
| 342 | enum lmb_flags flags) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 343 | { |
| 344 | struct lmb_region *_rgn = &(lmb->reserved); |
| 345 | |
Patrick Delaunay | e11c908 | 2021-05-07 14:50:29 +0200 | [diff] [blame] | 346 | return lmb_add_region_flags(_rgn, base, size, flags); |
| 347 | } |
| 348 | |
| 349 | long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
| 350 | { |
| 351 | return lmb_reserve_flags(lmb, base, size, LMB_NONE); |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 352 | } |
| 353 | |
Jeroen Hofstee | a020972 | 2014-10-08 22:57:39 +0200 | [diff] [blame] | 354 | static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 355 | phys_size_t size) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 356 | { |
| 357 | unsigned long i; |
| 358 | |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 359 | for (i = 0; i < rgn->cnt; i++) { |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 360 | phys_addr_t rgnbase = rgn->region[i].base; |
| 361 | phys_size_t rgnsize = rgn->region[i].size; |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 362 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 363 | break; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | return (i < rgn->cnt) ? i : -1; |
| 367 | } |
| 368 | |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 369 | phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 370 | { |
| 371 | return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); |
| 372 | } |
| 373 | |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 374 | phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 375 | { |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 376 | phys_addr_t alloc; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 377 | |
| 378 | alloc = __lmb_alloc_base(lmb, size, align, max_addr); |
| 379 | |
| 380 | if (alloc == 0) |
| 381 | printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 382 | (ulong)size, (ulong)max_addr); |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 383 | |
| 384 | return alloc; |
| 385 | } |
| 386 | |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 387 | static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 388 | { |
| 389 | return addr & ~(size - 1); |
| 390 | } |
| 391 | |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 392 | phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 393 | { |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 394 | long i, rgn; |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 395 | phys_addr_t base = 0; |
Andy Fleming | 78bd5a7 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 396 | phys_addr_t res_base; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 397 | |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 398 | for (i = lmb->memory.cnt - 1; i >= 0; i--) { |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 399 | phys_addr_t lmbbase = lmb->memory.region[i].base; |
| 400 | phys_size_t lmbsize = lmb->memory.region[i].size; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 401 | |
Andy Fleming | 78bd5a7 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 402 | if (lmbsize < size) |
| 403 | continue; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 404 | if (max_addr == LMB_ALLOC_ANYWHERE) |
| 405 | base = lmb_align_down(lmbbase + lmbsize - size, align); |
| 406 | else if (lmbbase < max_addr) { |
Stephen Warren | b6a010b | 2014-07-31 13:40:07 -0600 | [diff] [blame] | 407 | base = lmbbase + lmbsize; |
| 408 | if (base < lmbbase) |
| 409 | base = -1; |
| 410 | base = min(base, max_addr); |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 411 | base = lmb_align_down(base - size, align); |
| 412 | } else |
| 413 | continue; |
| 414 | |
Andy Fleming | 78bd5a7 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 415 | while (base && lmbbase <= base) { |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 416 | rgn = lmb_overlaps_region(&lmb->reserved, base, size); |
| 417 | if (rgn < 0) { |
Andy Fleming | 78bd5a7 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 418 | /* This area isn't reserved, take it */ |
| 419 | if (lmb_add_region(&lmb->reserved, base, |
Simon Goldschmidt | cb57d13 | 2019-01-14 22:38:16 +0100 | [diff] [blame] | 420 | size) < 0) |
Andy Fleming | 78bd5a7 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 421 | return 0; |
| 422 | return base; |
| 423 | } |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 424 | res_base = lmb->reserved.region[rgn].base; |
Andy Fleming | 78bd5a7 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 425 | if (res_base < size) |
| 426 | break; |
| 427 | base = lmb_align_down(res_base - size, align); |
| 428 | } |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 429 | } |
Andy Fleming | 78bd5a7 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 430 | return 0; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 431 | } |
| 432 | |
Simon Goldschmidt | 7a6ee46 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 433 | /* |
| 434 | * Try to allocate a specific address range: must be in defined memory but not |
| 435 | * reserved |
| 436 | */ |
| 437 | phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
| 438 | { |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 439 | long rgn; |
Simon Goldschmidt | 7a6ee46 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 440 | |
| 441 | /* Check if the requested address is in one of the memory regions */ |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 442 | rgn = lmb_overlaps_region(&lmb->memory, base, size); |
| 443 | if (rgn >= 0) { |
Simon Goldschmidt | 7a6ee46 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 444 | /* |
| 445 | * Check if the requested end address is in the same memory |
| 446 | * region we found. |
| 447 | */ |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 448 | if (lmb_addrs_overlap(lmb->memory.region[rgn].base, |
| 449 | lmb->memory.region[rgn].size, |
| 450 | base + size - 1, 1)) { |
Simon Goldschmidt | 7a6ee46 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 451 | /* ok, reserve the memory */ |
| 452 | if (lmb_reserve(lmb, base, size) >= 0) |
| 453 | return base; |
| 454 | } |
| 455 | } |
| 456 | return 0; |
| 457 | } |
| 458 | |
| 459 | /* Return number of bytes from a given address that are free */ |
Simon Goldschmidt | 7510a56 | 2019-01-21 20:29:55 +0100 | [diff] [blame] | 460 | phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr) |
Simon Goldschmidt | 7a6ee46 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 461 | { |
| 462 | int i; |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 463 | long rgn; |
Simon Goldschmidt | 7a6ee46 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 464 | |
| 465 | /* check if the requested address is in the memory regions */ |
Simon Goldschmidt | f41f7d6 | 2019-01-21 20:29:56 +0100 | [diff] [blame] | 466 | rgn = lmb_overlaps_region(&lmb->memory, addr, 1); |
| 467 | if (rgn >= 0) { |
Simon Goldschmidt | 7a6ee46 | 2019-01-14 22:38:18 +0100 | [diff] [blame] | 468 | for (i = 0; i < lmb->reserved.cnt; i++) { |
| 469 | if (addr < lmb->reserved.region[i].base) { |
| 470 | /* first reserved range > requested address */ |
| 471 | return lmb->reserved.region[i].base - addr; |
| 472 | } |
| 473 | if (lmb->reserved.region[i].base + |
| 474 | lmb->reserved.region[i].size > addr) { |
| 475 | /* requested addr is in this reserved range */ |
| 476 | return 0; |
| 477 | } |
| 478 | } |
| 479 | /* if we come here: no reserved ranges above requested addr */ |
| 480 | return lmb->memory.region[lmb->memory.cnt - 1].base + |
| 481 | lmb->memory.region[lmb->memory.cnt - 1].size - addr; |
| 482 | } |
| 483 | return 0; |
| 484 | } |
| 485 | |
Patrick Delaunay | db2c9aa | 2021-05-07 14:50:30 +0200 | [diff] [blame] | 486 | int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags) |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 487 | { |
| 488 | int i; |
| 489 | |
| 490 | for (i = 0; i < lmb->reserved.cnt; i++) { |
Becky Bruce | d26d67c | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 491 | phys_addr_t upper = lmb->reserved.region[i].base + |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 492 | lmb->reserved.region[i].size - 1; |
| 493 | if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) |
Patrick Delaunay | db2c9aa | 2021-05-07 14:50:30 +0200 | [diff] [blame] | 494 | return (lmb->reserved.region[i].flags & flags) == flags; |
Kumar Gala | 6d7bfa8 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 495 | } |
| 496 | return 0; |
| 497 | } |
Mike Frysinger | a0dadf8 | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 498 | |
Patrick Delaunay | db2c9aa | 2021-05-07 14:50:30 +0200 | [diff] [blame] | 499 | int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) |
| 500 | { |
| 501 | return lmb_is_reserved_flags(lmb, addr, LMB_NONE); |
| 502 | } |
| 503 | |
Jeroen Hofstee | 0461dda | 2014-06-26 20:04:37 +0200 | [diff] [blame] | 504 | __weak void board_lmb_reserve(struct lmb *lmb) |
Mike Frysinger | a0dadf8 | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 505 | { |
| 506 | /* please define platform specific board_lmb_reserve() */ |
| 507 | } |
Mike Frysinger | a0dadf8 | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 508 | |
Jeroen Hofstee | 0461dda | 2014-06-26 20:04:37 +0200 | [diff] [blame] | 509 | __weak void arch_lmb_reserve(struct lmb *lmb) |
Mike Frysinger | a0dadf8 | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 510 | { |
| 511 | /* please define platform specific arch_lmb_reserve() */ |
| 512 | } |