blob: 0aa2d8bd3b1b965289b269e01f41ba8c0e9c444c [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kumar Gala6d7bfa82008-02-27 21:51:47 -06002/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
Kumar Gala6d7bfa82008-02-27 21:51:47 -06007 */
8
Sughosh Ganu291bf9c2024-08-26 17:29:18 +05309#include <alist.h>
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +010010#include <efi_loader.h>
Simon Glass2dc9c342020-05-10 11:40:01 -060011#include <image.h>
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +010012#include <mapmem.h>
Kumar Gala6d7bfa82008-02-27 21:51:47 -060013#include <lmb.h>
Simon Glass0f2af882020-05-10 11:40:05 -060014#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070015#include <malloc.h>
Kumar Gala6d7bfa82008-02-27 21:51:47 -060016
Marek Vasuta2eec022021-09-10 22:47:09 +020017#include <asm/global_data.h>
Marek Vasut0fcae7f2021-11-13 18:34:37 +010018#include <asm/sections.h>
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053019#include <linux/kernel.h>
Marek Vasuta2eec022021-09-10 22:47:09 +020020
21DECLARE_GLOBAL_DATA_PTR;
22
Kumar Gala6d7bfa82008-02-27 21:51:47 -060023#define LMB_ALLOC_ANYWHERE 0
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053024#define LMB_ALIST_INITIAL_SIZE 4
Kumar Gala6d7bfa82008-02-27 21:51:47 -060025
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053026static struct lmb lmb;
27
28static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060029{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053030 struct lmb_region *rgn = lmb_rgn_lst->data;
Patrick Delaunay48120022021-05-07 14:50:31 +020031 unsigned long long base, size, end;
32 enum lmb_flags flags;
33 int i;
Kumar Gala6d7bfa82008-02-27 21:51:47 -060034
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053035 printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count);
Patrick Delaunay48120022021-05-07 14:50:31 +020036
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053037 for (i = 0; i < lmb_rgn_lst->count; i++) {
38 base = rgn[i].base;
39 size = rgn[i].size;
Patrick Delaunay48120022021-05-07 14:50:31 +020040 end = base + size - 1;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053041 flags = rgn[i].flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -060042
Patrick Delaunay48120022021-05-07 14:50:31 +020043 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
44 name, i, base, end, size, flags);
Kumar Gala6d7bfa82008-02-27 21:51:47 -060045 }
Tero Kristo97c418b2020-07-20 11:10:45 +030046}
47
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053048void lmb_dump_all_force(void)
Patrick Delaunay48120022021-05-07 14:50:31 +020049{
50 printf("lmb_dump_all:\n");
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053051 lmb_dump_region(&lmb.free_mem, "memory");
52 lmb_dump_region(&lmb.used_mem, "reserved");
Patrick Delaunay48120022021-05-07 14:50:31 +020053}
54
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053055void lmb_dump_all(void)
Tero Kristo97c418b2020-07-20 11:10:45 +030056{
57#ifdef DEBUG
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053058 lmb_dump_all_force();
Tero Kristo97c418b2020-07-20 11:10:45 +030059#endif
Kumar Gala6d7bfa82008-02-27 21:51:47 -060060}
61
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +010062static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
63 phys_addr_t base2, phys_size_t size2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060064{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +010065 const phys_addr_t base1_end = base1 + size1 - 1;
66 const phys_addr_t base2_end = base2 + size2 - 1;
67
68 return ((base1 <= base2_end) && (base2 <= base1_end));
Kumar Gala6d7bfa82008-02-27 21:51:47 -060069}
70
Becky Bruced26d67c2008-06-09 20:37:18 -050071static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +010072 phys_addr_t base2, phys_size_t size2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060073{
74 if (base2 == base1 + size1)
75 return 1;
76 else if (base1 == base2 + size2)
77 return -1;
78
79 return 0;
80}
81
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053082static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
Udit Kumar4e8e6632023-09-26 16:54:42 +053083 unsigned long r2)
84{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053085 struct lmb_region *rgn = lmb_rgn_lst->data;
86
87 phys_addr_t base1 = rgn[r1].base;
88 phys_size_t size1 = rgn[r1].size;
89 phys_addr_t base2 = rgn[r2].base;
90 phys_size_t size2 = rgn[r2].size;
Udit Kumar4e8e6632023-09-26 16:54:42 +053091
92 return lmb_addrs_overlap(base1, size1, base2, size2);
93}
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053094
95static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +010096 unsigned long r2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060097{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053098 struct lmb_region *rgn = lmb_rgn_lst->data;
99
100 phys_addr_t base1 = rgn[r1].base;
101 phys_size_t size1 = rgn[r1].size;
102 phys_addr_t base2 = rgn[r2].base;
103 phys_size_t size2 = rgn[r2].size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600104 return lmb_addrs_adjacent(base1, size1, base2, size2);
105}
106
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530107static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600108{
109 unsigned long i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530110 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600111
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530112 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
113 rgn[i].base = rgn[i + 1].base;
114 rgn[i].size = rgn[i + 1].size;
115 rgn[i].flags = rgn[i + 1].flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600116 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530117 lmb_rgn_lst->count--;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600118}
119
120/* Assumption: base addr of region 1 < base addr of region 2 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530121static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100122 unsigned long r2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600123{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530124 struct lmb_region *rgn = lmb_rgn_lst->data;
125
126 rgn[r1].size += rgn[r2].size;
127 lmb_remove_region(lmb_rgn_lst, r2);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600128}
129
Udit Kumar4e8e6632023-09-26 16:54:42 +0530130/*Assumption : base addr of region 1 < base addr of region 2*/
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530131static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
132 unsigned long r1, unsigned long r2)
Udit Kumar4e8e6632023-09-26 16:54:42 +0530133{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530134 struct lmb_region *rgn = lmb_rgn_lst->data;
135
136 phys_addr_t base1 = rgn[r1].base;
137 phys_size_t size1 = rgn[r1].size;
138 phys_addr_t base2 = rgn[r2].base;
139 phys_size_t size2 = rgn[r2].size;
Udit Kumar4e8e6632023-09-26 16:54:42 +0530140
141 if (base1 + size1 > base2 + size2) {
142 printf("This will not be a case any time\n");
143 return;
144 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530145 rgn[r1].size = base2 + size2 - base1;
146 lmb_remove_region(lmb_rgn_lst, r2);
Udit Kumar4e8e6632023-09-26 16:54:42 +0530147}
148
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530149void arch_lmb_reserve_generic(ulong sp, ulong end, ulong align)
Marek Vasuta2eec022021-09-10 22:47:09 +0200150{
151 ulong bank_end;
152 int bank;
153
154 /*
155 * Reserve memory from aligned address below the bottom of U-Boot stack
156 * until end of U-Boot area using LMB to prevent U-Boot from overwriting
157 * that memory.
158 */
159 debug("## Current stack ends at 0x%08lx ", sp);
160
161 /* adjust sp by 4K to be safe */
162 sp -= align;
163 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
164 if (!gd->bd->bi_dram[bank].size ||
165 sp < gd->bd->bi_dram[bank].start)
166 continue;
167 /* Watch out for RAM at end of address space! */
168 bank_end = gd->bd->bi_dram[bank].start +
169 gd->bd->bi_dram[bank].size - 1;
170 if (sp > bank_end)
171 continue;
172 if (bank_end > end)
173 bank_end = end - 1;
174
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530175 lmb_reserve(sp, bank_end - sp + 1);
Marek Vasut0fcae7f2021-11-13 18:34:37 +0100176
177 if (gd->flags & GD_FLG_SKIP_RELOC)
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530178 lmb_reserve((phys_addr_t)(uintptr_t)_start, gd->mon_len);
Marek Vasut0fcae7f2021-11-13 18:34:37 +0100179
Marek Vasuta2eec022021-09-10 22:47:09 +0200180 break;
181 }
182}
183
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +0100184/**
185 * efi_lmb_reserve() - add reservations for EFI memory
186 *
187 * Add reservations for all EFI memory areas that are not
188 * EFI_CONVENTIONAL_MEMORY.
189 *
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +0100190 * Return: 0 on success, 1 on failure
191 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530192static __maybe_unused int efi_lmb_reserve(void)
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +0100193{
194 struct efi_mem_desc *memmap = NULL, *map;
195 efi_uintn_t i, map_size = 0;
196 efi_status_t ret;
197
198 ret = efi_get_memory_map_alloc(&map_size, &memmap);
199 if (ret != EFI_SUCCESS)
200 return 1;
201
202 for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) {
Sjoerd Simons47a5a842023-01-19 09:38:18 +0100203 if (map->type != EFI_CONVENTIONAL_MEMORY) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530204 lmb_reserve_flags(map_to_sysmem((void *)(uintptr_t)
Sjoerd Simons47a5a842023-01-19 09:38:18 +0100205 map->physical_start),
206 map->num_pages * EFI_PAGE_SIZE,
207 map->type == EFI_RESERVED_MEMORY_TYPE
208 ? LMB_NOMAP : LMB_NONE);
209 }
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +0100210 }
211 efi_free_pool(memmap);
212
213 return 0;
214}
215
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530216static void lmb_reserve_common(void *fdt_blob)
Simon Goldschmidt5b2c6872019-01-14 22:38:19 +0100217{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530218 arch_lmb_reserve();
219 board_lmb_reserve();
Simon Goldschmidt5b2c6872019-01-14 22:38:19 +0100220
Simon Glass85c057e2021-09-25 19:43:21 -0600221 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530222 boot_fdt_add_mem_rsv_regions(fdt_blob);
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +0100223
224 if (CONFIG_IS_ENABLED(EFI_LOADER))
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530225 efi_lmb_reserve();
Simon Goldschmidt5b2c6872019-01-14 22:38:19 +0100226}
227
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100228/* Initialize the struct, add memory and call arch/board reserve functions */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530229void lmb_init_and_reserve(struct bd_info *bd, void *fdt_blob)
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100230{
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100231 int i;
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100232
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100233 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530234 if (bd->bi_dram[i].size)
235 lmb_add(bd->bi_dram[i].start, bd->bi_dram[i].size);
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100236 }
Stefan Roesebbc88462020-08-12 11:55:46 +0200237
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530238 lmb_reserve_common(fdt_blob);
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100239}
240
241/* Initialize the struct, add memory and call arch/board reserve functions */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530242void lmb_init_and_reserve_range(phys_addr_t base, phys_size_t size,
243 void *fdt_blob)
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100244{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530245 lmb_add(base, size);
246 lmb_reserve_common(fdt_blob);
Simon Goldschmidt8890e7d2019-01-26 22:13:04 +0100247}
248
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530249/**
250 * lmb_add_region_flags() - Add an lmb region to the given list
251 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
252 * @base: Start address of the region
253 * @size: Size of the region to be added
254 * @flags: Attributes of the LMB region
255 *
256 * Add a region of memory to the list. If the region does not exist, add
257 * it to the list. Depending on the attributes of the region to be added,
258 * the function might resize an already existing region or coalesce two
259 * adjacent regions.
260 *
261 *
262 * Returns: 0 if the region addition successful, -1 on failure
263 */
264static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200265 phys_size_t size, enum lmb_flags flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600266{
267 unsigned long coalesced = 0;
268 long adjacent, i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530269 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600270
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530271 if (alist_err(lmb_rgn_lst))
272 return -1;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600273
274 /* First try and coalesce this LMB with another. */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530275 for (i = 0; i < lmb_rgn_lst->count; i++) {
276 phys_addr_t rgnbase = rgn[i].base;
277 phys_size_t rgnsize = rgn[i].size;
278 phys_size_t rgnflags = rgn[i].flags;
Sjoerd Simons8cce4912023-02-12 16:07:05 +0100279 phys_addr_t end = base + size - 1;
280 phys_addr_t rgnend = rgnbase + rgnsize - 1;
Sjoerd Simons8cce4912023-02-12 16:07:05 +0100281 if (rgnbase <= base && end <= rgnend) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200282 if (flags == rgnflags)
283 /* Already have this region, so we're done */
284 return 0;
285 else
286 return -1; /* regions with new flags */
287 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600288
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100289 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
290 if (adjacent > 0) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200291 if (flags != rgnflags)
292 break;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530293 rgn[i].base -= size;
294 rgn[i].size += size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600295 coalesced++;
296 break;
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100297 } else if (adjacent < 0) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200298 if (flags != rgnflags)
299 break;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530300 rgn[i].size += size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600301 coalesced++;
302 break;
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100303 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
304 /* regions overlap */
305 return -1;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600306 }
307 }
308
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530309 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
310 rgn = lmb_rgn_lst->data;
311 if (rgn[i].flags == rgn[i + 1].flags) {
312 if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
313 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
314 coalesced++;
315 } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
316 /* fix overlapping area */
317 lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
318 coalesced++;
319 }
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200320 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600321 }
322
323 if (coalesced)
324 return coalesced;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530325
326 if (alist_full(lmb_rgn_lst) &&
327 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600328 return -1;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530329 rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600330
331 /* Couldn't coalesce the LMB, so add it to the sorted table. */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530332 for (i = lmb_rgn_lst->count; i >= 0; i--) {
333 if (i && base < rgn[i - 1].base) {
334 rgn[i] = rgn[i - 1];
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600335 } else {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530336 rgn[i].base = base;
337 rgn[i].size = size;
338 rgn[i].flags = flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600339 break;
340 }
341 }
342
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530343 lmb_rgn_lst->count++;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600344
345 return 0;
346}
347
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530348static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200349 phys_size_t size)
350{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530351 return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200352}
353
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600354/* This routine may be called with relocation disabled. */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530355long lmb_add(phys_addr_t base, phys_size_t size)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600356{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530357 struct alist *lmb_rgn_lst = &lmb.free_mem;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600358
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530359 return lmb_add_region(lmb_rgn_lst, base, size);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600360}
361
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530362long lmb_free(phys_addr_t base, phys_size_t size)
Andy Fleming1ae346c2008-06-16 13:58:54 -0500363{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530364 struct lmb_region *rgn;
365 struct alist *lmb_rgn_lst = &lmb.used_mem;
Andy Fleming09d2a712008-07-07 14:24:39 -0500366 phys_addr_t rgnbegin, rgnend;
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100367 phys_addr_t end = base + size - 1;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500368 int i;
369
370 rgnbegin = rgnend = 0; /* supress gcc warnings */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530371 rgn = lmb_rgn_lst->data;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500372 /* Find the region where (base, size) belongs to */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530373 for (i = 0; i < lmb_rgn_lst->count; i++) {
374 rgnbegin = rgn[i].base;
375 rgnend = rgnbegin + rgn[i].size - 1;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500376
377 if ((rgnbegin <= base) && (end <= rgnend))
378 break;
379 }
380
381 /* Didn't find the region */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530382 if (i == lmb_rgn_lst->count)
Andy Fleming1ae346c2008-06-16 13:58:54 -0500383 return -1;
384
385 /* Check to see if we are removing entire region */
386 if ((rgnbegin == base) && (rgnend == end)) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530387 lmb_remove_region(lmb_rgn_lst, i);
Andy Fleming1ae346c2008-06-16 13:58:54 -0500388 return 0;
389 }
390
391 /* Check to see if region is matching at the front */
392 if (rgnbegin == base) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530393 rgn[i].base = end + 1;
394 rgn[i].size -= size;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500395 return 0;
396 }
397
398 /* Check to see if the region is matching at the end */
399 if (rgnend == end) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530400 rgn[i].size -= size;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500401 return 0;
402 }
403
404 /*
405 * We need to split the entry - adjust the current one to the
406 * beginging of the hole and add the region after hole.
407 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530408 rgn[i].size = base - rgn[i].base;
409 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
410 rgn[i].flags);
Andy Fleming1ae346c2008-06-16 13:58:54 -0500411}
412
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530413long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600414{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530415 struct alist *lmb_rgn_lst = &lmb.used_mem;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600416
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530417 return lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200418}
419
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530420long lmb_reserve(phys_addr_t base, phys_size_t size)
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200421{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530422 return lmb_reserve_flags(base, size, LMB_NONE);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600423}
424
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530425static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Becky Bruced26d67c2008-06-09 20:37:18 -0500426 phys_size_t size)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600427{
428 unsigned long i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530429 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600430
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530431 for (i = 0; i < lmb_rgn_lst->count; i++) {
432 phys_addr_t rgnbase = rgn[i].base;
433 phys_size_t rgnsize = rgn[i].size;
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100434 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600435 break;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600436 }
437
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530438 return (i < lmb_rgn_lst->count) ? i : -1;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600439}
440
Becky Bruced26d67c2008-06-09 20:37:18 -0500441static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600442{
443 return addr & ~(size - 1);
444}
445
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530446static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
447 phys_addr_t max_addr)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600448{
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100449 long i, rgn;
Becky Bruced26d67c2008-06-09 20:37:18 -0500450 phys_addr_t base = 0;
Andy Fleming78bd5a72008-06-16 13:58:55 -0500451 phys_addr_t res_base;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530452 struct lmb_region *lmb_used = lmb.used_mem.data;
453 struct lmb_region *lmb_memory = lmb.free_mem.data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600454
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530455 for (i = lmb.free_mem.count - 1; i >= 0; i--) {
456 phys_addr_t lmbbase = lmb_memory[i].base;
457 phys_size_t lmbsize = lmb_memory[i].size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600458
Andy Fleming78bd5a72008-06-16 13:58:55 -0500459 if (lmbsize < size)
460 continue;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600461 if (max_addr == LMB_ALLOC_ANYWHERE)
462 base = lmb_align_down(lmbbase + lmbsize - size, align);
463 else if (lmbbase < max_addr) {
Stephen Warrenb6a010b2014-07-31 13:40:07 -0600464 base = lmbbase + lmbsize;
465 if (base < lmbbase)
466 base = -1;
467 base = min(base, max_addr);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600468 base = lmb_align_down(base - size, align);
469 } else
470 continue;
471
Andy Fleming78bd5a72008-06-16 13:58:55 -0500472 while (base && lmbbase <= base) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530473 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100474 if (rgn < 0) {
Andy Fleming78bd5a72008-06-16 13:58:55 -0500475 /* This area isn't reserved, take it */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530476 if (lmb_add_region(&lmb.used_mem, base,
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100477 size) < 0)
Andy Fleming78bd5a72008-06-16 13:58:55 -0500478 return 0;
479 return base;
480 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530481
482 res_base = lmb_used[rgn].base;
Andy Fleming78bd5a72008-06-16 13:58:55 -0500483 if (res_base < size)
484 break;
485 base = lmb_align_down(res_base - size, align);
486 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600487 }
Andy Fleming78bd5a72008-06-16 13:58:55 -0500488 return 0;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600489}
490
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530491phys_addr_t lmb_alloc(phys_size_t size, ulong align)
Sughosh Ganu78435de2024-08-26 17:29:16 +0530492{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530493 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
Sughosh Ganu78435de2024-08-26 17:29:16 +0530494}
495
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530496phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
Sughosh Ganu78435de2024-08-26 17:29:16 +0530497{
498 phys_addr_t alloc;
499
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530500 alloc = __lmb_alloc_base(size, align, max_addr);
Sughosh Ganu78435de2024-08-26 17:29:16 +0530501
502 if (alloc == 0)
503 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
504 (ulong)size, (ulong)max_addr);
505
506 return alloc;
507}
508
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100509/*
510 * Try to allocate a specific address range: must be in defined memory but not
511 * reserved
512 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530513phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100514{
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100515 long rgn;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530516 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100517
518 /* Check if the requested address is in one of the memory regions */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530519 rgn = lmb_overlaps_region(&lmb.free_mem, base, size);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100520 if (rgn >= 0) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100521 /*
522 * Check if the requested end address is in the same memory
523 * region we found.
524 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530525 if (lmb_addrs_overlap(lmb_memory[rgn].base,
526 lmb_memory[rgn].size,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100527 base + size - 1, 1)) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100528 /* ok, reserve the memory */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530529 if (lmb_reserve(base, size) >= 0)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100530 return base;
531 }
532 }
533 return 0;
534}
535
536/* Return number of bytes from a given address that are free */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530537phys_size_t lmb_get_free_size(phys_addr_t addr)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100538{
539 int i;
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100540 long rgn;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530541 struct lmb_region *lmb_used = lmb.used_mem.data;
542 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100543
544 /* check if the requested address is in the memory regions */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530545 rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100546 if (rgn >= 0) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530547 for (i = 0; i < lmb.used_mem.count; i++) {
548 if (addr < lmb_used[i].base) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100549 /* first reserved range > requested address */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530550 return lmb_used[i].base - addr;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100551 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530552 if (lmb_used[i].base +
553 lmb_used[i].size > addr) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100554 /* requested addr is in this reserved range */
555 return 0;
556 }
557 }
558 /* if we come here: no reserved ranges above requested addr */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530559 return lmb_memory[lmb.free_mem.count - 1].base +
560 lmb_memory[lmb.free_mem.count - 1].size - addr;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100561 }
562 return 0;
563}
564
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530565int lmb_is_reserved_flags(phys_addr_t addr, int flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600566{
567 int i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530568 struct lmb_region *lmb_used = lmb.used_mem.data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600569
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530570 for (i = 0; i < lmb.used_mem.count; i++) {
571 phys_addr_t upper = lmb_used[i].base +
572 lmb_used[i].size - 1;
573 if (addr >= lmb_used[i].base && addr <= upper)
574 return (lmb_used[i].flags & flags) == flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600575 }
576 return 0;
577}
Mike Frysingera0dadf82009-11-03 11:35:59 -0500578
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530579__weak void board_lmb_reserve(void)
Mike Frysingera0dadf82009-11-03 11:35:59 -0500580{
581 /* please define platform specific board_lmb_reserve() */
582}
Mike Frysingera0dadf82009-11-03 11:35:59 -0500583
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530584__weak void arch_lmb_reserve(void)
Mike Frysingera0dadf82009-11-03 11:35:59 -0500585{
586 /* please define platform specific arch_lmb_reserve() */
587}
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530588
589static int lmb_setup(void)
590{
591 bool ret;
592
593 ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region),
594 (uint)LMB_ALIST_INITIAL_SIZE);
595 if (!ret) {
596 log_debug("Unable to initialise the list for LMB free memory\n");
597 return -ENOMEM;
598 }
599
600 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
601 (uint)LMB_ALIST_INITIAL_SIZE);
602 if (!ret) {
603 log_debug("Unable to initialise the list for LMB used memory\n");
604 return -ENOMEM;
605 }
606
607 return 0;
608}
609
610/**
611 * lmb_init() - Initialise the LMB module
612 *
613 * Initialise the LMB lists needed for keeping the memory map. There
614 * are two lists, in form of alloced list data structure. One for the
615 * available memory, and one for the used memory. Initialise the two
616 * lists as part of board init. Add memory to the available memory
617 * list and reserve common areas by adding them to the used memory
618 * list.
619 *
620 * Return: 0 on success, -ve on error
621 */
622int lmb_init(void)
623{
624 int ret;
625
626 ret = lmb_setup();
627 if (ret) {
628 log_info("Unable to init LMB\n");
629 return ret;
630 }
631
632 return 0;
633}
634
635#if CONFIG_IS_ENABLED(UNIT_TEST)
636struct lmb *lmb_get(void)
637{
638 return &lmb;
639}
640
641int lmb_push(struct lmb *store)
642{
643 int ret;
644
645 *store = lmb;
646 ret = lmb_setup();
647 if (ret)
648 return ret;
649
650 return 0;
651}
652
653void lmb_pop(struct lmb *store)
654{
655 alist_uninit(&lmb.free_mem);
656 alist_uninit(&lmb.used_mem);
657 lmb = *store;
658}
659#endif /* UNIT_TEST */