blob: cc2554dfa947182806c37b31da292becc1c868a1 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kumar Gala6d7bfa82008-02-27 21:51:47 -06002/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
Kumar Gala6d7bfa82008-02-27 21:51:47 -06007 */
8
Sughosh Ganu291bf9c2024-08-26 17:29:18 +05309#include <alist.h>
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +010010#include <efi_loader.h>
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053011#include <event.h>
Simon Glass2dc9c342020-05-10 11:40:01 -060012#include <image.h>
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +010013#include <mapmem.h>
Kumar Gala6d7bfa82008-02-27 21:51:47 -060014#include <lmb.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <malloc.h>
Sughosh Ganu6518b412024-08-26 17:29:24 +053017#include <spl.h>
Kumar Gala6d7bfa82008-02-27 21:51:47 -060018
Marek Vasuta2eec022021-09-10 22:47:09 +020019#include <asm/global_data.h>
Marek Vasut0fcae7f2021-11-13 18:34:37 +010020#include <asm/sections.h>
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053021#include <linux/kernel.h>
Sughosh Ganue5348c72024-08-26 17:29:30 +053022#include <linux/sizes.h>
Marek Vasuta2eec022021-09-10 22:47:09 +020023
24DECLARE_GLOBAL_DATA_PTR;
25
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053026#define MAP_OP_RESERVE (u8)0x1
27#define MAP_OP_FREE (u8)0x2
28#define MAP_OP_ADD (u8)0x3
29
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053030static struct lmb lmb;
31
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053032static bool lmb_should_notify(enum lmb_flags flags)
33{
34 return !lmb.test && !(flags & LMB_NONOTIFY) &&
35 CONFIG_IS_ENABLED(EFI_LOADER);
36}
37
Heinrich Schuchardte904f172024-10-28 07:21:36 +010038static int lmb_map_update_notify(phys_addr_t addr, phys_size_t size, u8 op,
39 enum lmb_flags flags)
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053040{
41 u64 efi_addr;
42 u64 pages;
43 efi_status_t status;
44
45 if (op != MAP_OP_RESERVE && op != MAP_OP_FREE && op != MAP_OP_ADD) {
46 log_err("Invalid map update op received (%d)\n", op);
47 return -1;
48 }
49
Ilias Apalodimas2930f892024-10-23 18:22:01 +030050 if (!lmb_should_notify(flags))
51 return 0;
52
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053053 efi_addr = (uintptr_t)map_sysmem(addr, 0);
54 pages = efi_size_in_pages(size + (efi_addr & EFI_PAGE_MASK));
55 efi_addr &= ~EFI_PAGE_MASK;
56
57 status = efi_add_memory_map_pg(efi_addr, pages,
58 op == MAP_OP_RESERVE ?
59 EFI_BOOT_SERVICES_DATA :
60 EFI_CONVENTIONAL_MEMORY,
61 false);
62 if (status != EFI_SUCCESS) {
63 log_err("%s: LMB Map notify failure %lu\n", __func__,
64 status & ~EFI_ERROR_MASK);
65 return -1;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053066 }
Ilias Apalodimas036d2642024-10-24 13:46:25 +030067 unmap_sysmem((void *)(uintptr_t)efi_addr);
Ilias Apalodimas2930f892024-10-23 18:22:01 +030068
69 return 0;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053070}
71
Sughosh Ganuf04416f2024-08-26 17:29:40 +053072static void lmb_print_region_flags(enum lmb_flags flags)
73{
74 u64 bitpos;
Sughosh Ganu7ebbdd72024-10-15 21:07:04 +053075 const char *flag_str[] = { "none", "no-map", "no-overwrite", "no-notify" };
Sughosh Ganuf04416f2024-08-26 17:29:40 +053076
77 do {
78 bitpos = flags ? fls(flags) - 1 : 0;
Sughosh Ganucd9feea2024-10-21 22:48:20 +053079 assert_noisy(bitpos < ARRAY_SIZE(flag_str));
Sughosh Ganuf04416f2024-08-26 17:29:40 +053080 printf("%s", flag_str[bitpos]);
81 flags &= ~(1ull << bitpos);
82 puts(flags ? ", " : "\n");
83 } while (flags);
84}
85
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053086static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060087{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053088 struct lmb_region *rgn = lmb_rgn_lst->data;
Patrick Delaunay48120022021-05-07 14:50:31 +020089 unsigned long long base, size, end;
90 enum lmb_flags flags;
91 int i;
Kumar Gala6d7bfa82008-02-27 21:51:47 -060092
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053093 printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count);
Patrick Delaunay48120022021-05-07 14:50:31 +020094
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053095 for (i = 0; i < lmb_rgn_lst->count; i++) {
96 base = rgn[i].base;
97 size = rgn[i].size;
Patrick Delaunay48120022021-05-07 14:50:31 +020098 end = base + size - 1;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053099 flags = rgn[i].flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600100
Sughosh Ganuf04416f2024-08-26 17:29:40 +0530101 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: ",
102 name, i, base, end, size);
103 lmb_print_region_flags(flags);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600104 }
Tero Kristo97c418b2020-07-20 11:10:45 +0300105}
106
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530107void lmb_dump_all_force(void)
Patrick Delaunay48120022021-05-07 14:50:31 +0200108{
109 printf("lmb_dump_all:\n");
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530110 lmb_dump_region(&lmb.free_mem, "memory");
111 lmb_dump_region(&lmb.used_mem, "reserved");
Patrick Delaunay48120022021-05-07 14:50:31 +0200112}
113
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530114void lmb_dump_all(void)
Tero Kristo97c418b2020-07-20 11:10:45 +0300115{
116#ifdef DEBUG
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530117 lmb_dump_all_force();
Tero Kristo97c418b2020-07-20 11:10:45 +0300118#endif
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600119}
120
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100121static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
122 phys_addr_t base2, phys_size_t size2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600123{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100124 const phys_addr_t base1_end = base1 + size1 - 1;
125 const phys_addr_t base2_end = base2 + size2 - 1;
126
127 return ((base1 <= base2_end) && (base2 <= base1_end));
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600128}
129
Becky Bruced26d67c2008-06-09 20:37:18 -0500130static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100131 phys_addr_t base2, phys_size_t size2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600132{
133 if (base2 == base1 + size1)
134 return 1;
135 else if (base1 == base2 + size2)
136 return -1;
137
138 return 0;
139}
140
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530141static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
Udit Kumar4e8e6632023-09-26 16:54:42 +0530142 unsigned long r2)
143{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530144 struct lmb_region *rgn = lmb_rgn_lst->data;
145
146 phys_addr_t base1 = rgn[r1].base;
147 phys_size_t size1 = rgn[r1].size;
148 phys_addr_t base2 = rgn[r2].base;
149 phys_size_t size2 = rgn[r2].size;
Udit Kumar4e8e6632023-09-26 16:54:42 +0530150
151 return lmb_addrs_overlap(base1, size1, base2, size2);
152}
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530153
154static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100155 unsigned long r2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600156{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530157 struct lmb_region *rgn = lmb_rgn_lst->data;
158
159 phys_addr_t base1 = rgn[r1].base;
160 phys_size_t size1 = rgn[r1].size;
161 phys_addr_t base2 = rgn[r2].base;
162 phys_size_t size2 = rgn[r2].size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600163 return lmb_addrs_adjacent(base1, size1, base2, size2);
164}
165
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530166static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600167{
168 unsigned long i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530169 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600170
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530171 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
172 rgn[i].base = rgn[i + 1].base;
173 rgn[i].size = rgn[i + 1].size;
174 rgn[i].flags = rgn[i + 1].flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600175 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530176 lmb_rgn_lst->count--;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600177}
178
179/* Assumption: base addr of region 1 < base addr of region 2 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530180static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100181 unsigned long r2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600182{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530183 struct lmb_region *rgn = lmb_rgn_lst->data;
184
185 rgn[r1].size += rgn[r2].size;
186 lmb_remove_region(lmb_rgn_lst, r2);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600187}
188
Udit Kumar4e8e6632023-09-26 16:54:42 +0530189/*Assumption : base addr of region 1 < base addr of region 2*/
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530190static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
191 unsigned long r1, unsigned long r2)
Udit Kumar4e8e6632023-09-26 16:54:42 +0530192{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530193 struct lmb_region *rgn = lmb_rgn_lst->data;
194
195 phys_addr_t base1 = rgn[r1].base;
196 phys_size_t size1 = rgn[r1].size;
197 phys_addr_t base2 = rgn[r2].base;
198 phys_size_t size2 = rgn[r2].size;
Udit Kumar4e8e6632023-09-26 16:54:42 +0530199
200 if (base1 + size1 > base2 + size2) {
201 printf("This will not be a case any time\n");
202 return;
203 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530204 rgn[r1].size = base2 + size2 - base1;
205 lmb_remove_region(lmb_rgn_lst, r2);
Udit Kumar4e8e6632023-09-26 16:54:42 +0530206}
207
Sughosh Ganue5348c72024-08-26 17:29:30 +0530208static void lmb_reserve_uboot_region(void)
209{
210 int bank;
211 ulong end, bank_end;
212 phys_addr_t rsv_start;
213
214 rsv_start = gd->start_addr_sp - CONFIG_STACK_SIZE;
215 end = gd->ram_top;
216
217 /*
218 * Reserve memory from aligned address below the bottom of U-Boot stack
219 * until end of RAM area to prevent LMB from overwriting that memory.
220 */
221 debug("## Current stack ends at 0x%08lx ", (ulong)rsv_start);
222
Sughosh Ganue5348c72024-08-26 17:29:30 +0530223 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
224 if (!gd->bd->bi_dram[bank].size ||
225 rsv_start < gd->bd->bi_dram[bank].start)
226 continue;
227 /* Watch out for RAM at end of address space! */
228 bank_end = gd->bd->bi_dram[bank].start +
229 gd->bd->bi_dram[bank].size - 1;
230 if (rsv_start > bank_end)
231 continue;
232 if (bank_end > end)
233 bank_end = end - 1;
234
235 lmb_reserve_flags(rsv_start, bank_end - rsv_start + 1,
236 LMB_NOOVERWRITE);
237
238 if (gd->flags & GD_FLG_SKIP_RELOC)
239 lmb_reserve_flags((phys_addr_t)(uintptr_t)_start,
240 gd->mon_len, LMB_NOOVERWRITE);
241
242 break;
243 }
244}
245
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530246static void lmb_reserve_common(void *fdt_blob)
Simon Goldschmidt5b2c6872019-01-14 22:38:19 +0100247{
Sughosh Ganue5348c72024-08-26 17:29:30 +0530248 lmb_reserve_uboot_region();
Simon Goldschmidt5b2c6872019-01-14 22:38:19 +0100249
Simon Glass85c057e2021-09-25 19:43:21 -0600250 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530251 boot_fdt_add_mem_rsv_regions(fdt_blob);
Simon Goldschmidt5b2c6872019-01-14 22:38:19 +0100252}
253
Sughosh Ganu6518b412024-08-26 17:29:24 +0530254static __maybe_unused void lmb_reserve_common_spl(void)
255{
256 phys_addr_t rsv_start;
257 phys_size_t rsv_size;
258
259 /*
260 * Assume a SPL stack of 16KB. This must be
261 * more than enough for the SPL stage.
262 */
263 if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) {
264 rsv_start = gd->start_addr_sp - 16384;
265 rsv_size = 16384;
266 lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
267 }
268
269 if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) {
270 /* Reserve the bss region */
271 rsv_start = (phys_addr_t)(uintptr_t)__bss_start;
272 rsv_size = (phys_addr_t)(uintptr_t)__bss_end -
273 (phys_addr_t)(uintptr_t)__bss_start;
274 lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
275 }
276}
277
Sughosh Ganu65597fa2024-08-26 17:29:23 +0530278/**
279 * lmb_add_memory() - Add memory range for LMB allocations
280 *
281 * Add the entire available memory range to the pool of memory that
282 * can be used by the LMB module for allocations.
283 *
284 * Return: None
285 */
286void lmb_add_memory(void)
287{
288 int i;
289 phys_size_t size;
Sughosh Ganu65597fa2024-08-26 17:29:23 +0530290 u64 ram_top = gd->ram_top;
291 struct bd_info *bd = gd->bd;
292
Sughosh Ganu1a36d442024-10-15 21:07:11 +0530293 if (CONFIG_IS_ENABLED(LMB_ARCH_MEM_MAP))
294 return lmb_arch_add_memory();
295
Sughosh Ganu65597fa2024-08-26 17:29:23 +0530296 /* Assume a 4GB ram_top if not defined */
297 if (!ram_top)
298 ram_top = 0x100000000ULL;
299
300 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
301 size = bd->bi_dram[i].size;
302 if (size) {
Sughosh Ganu65597fa2024-08-26 17:29:23 +0530303 lmb_add(bd->bi_dram[i].start, size);
Sughosh Ganubafdac02024-10-15 21:07:05 +0530304
305 /*
306 * Reserve memory above ram_top as
307 * no-overwrite so that it cannot be
308 * allocated
309 */
310 if (bd->bi_dram[i].start >= ram_top)
311 lmb_reserve_flags(bd->bi_dram[i].start, size,
312 LMB_NOOVERWRITE);
Sughosh Ganu65597fa2024-08-26 17:29:23 +0530313 }
314 }
315}
316
Sughosh Ganu50e27272024-08-26 17:29:19 +0530317static long lmb_resize_regions(struct alist *lmb_rgn_lst,
318 unsigned long idx_start,
319 phys_addr_t base, phys_size_t size)
320{
321 phys_size_t rgnsize;
322 unsigned long rgn_cnt, idx, idx_end;
323 phys_addr_t rgnbase, rgnend;
324 phys_addr_t mergebase, mergeend;
325 struct lmb_region *rgn = lmb_rgn_lst->data;
326
327 rgn_cnt = 0;
328 idx = idx_start;
329 idx_end = idx_start;
330
331 /*
332 * First thing to do is to identify how many regions
333 * the requested region overlaps.
334 * If the flags match, combine all these overlapping
335 * regions into a single region, and remove the merged
336 * regions.
337 */
338 while (idx <= lmb_rgn_lst->count - 1) {
339 rgnbase = rgn[idx].base;
340 rgnsize = rgn[idx].size;
341
342 if (lmb_addrs_overlap(base, size, rgnbase,
343 rgnsize)) {
344 if (rgn[idx].flags != LMB_NONE)
345 return -1;
346 rgn_cnt++;
347 idx_end = idx;
348 }
349 idx++;
350 }
351
352 /* The merged region's base and size */
353 rgnbase = rgn[idx_start].base;
354 mergebase = min(base, rgnbase);
355 rgnend = rgn[idx_end].base + rgn[idx_end].size;
356 mergeend = max(rgnend, (base + size));
357
358 rgn[idx_start].base = mergebase;
359 rgn[idx_start].size = mergeend - mergebase;
360
361 /* Now remove the merged regions */
362 while (--rgn_cnt)
363 lmb_remove_region(lmb_rgn_lst, idx_start + 1);
364
365 return 0;
366}
367
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530368/**
369 * lmb_add_region_flags() - Add an lmb region to the given list
370 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
371 * @base: Start address of the region
372 * @size: Size of the region to be added
373 * @flags: Attributes of the LMB region
374 *
375 * Add a region of memory to the list. If the region does not exist, add
376 * it to the list. Depending on the attributes of the region to be added,
377 * the function might resize an already existing region or coalesce two
378 * adjacent regions.
379 *
380 *
381 * Returns: 0 if the region addition successful, -1 on failure
382 */
383static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200384 phys_size_t size, enum lmb_flags flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600385{
386 unsigned long coalesced = 0;
Sughosh Ganu50e27272024-08-26 17:29:19 +0530387 long ret, i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530388 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600389
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530390 if (alist_err(lmb_rgn_lst))
391 return -1;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600392
393 /* First try and coalesce this LMB with another. */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530394 for (i = 0; i < lmb_rgn_lst->count; i++) {
395 phys_addr_t rgnbase = rgn[i].base;
396 phys_size_t rgnsize = rgn[i].size;
397 phys_size_t rgnflags = rgn[i].flags;
Sjoerd Simons8cce4912023-02-12 16:07:05 +0100398 phys_addr_t end = base + size - 1;
399 phys_addr_t rgnend = rgnbase + rgnsize - 1;
Sjoerd Simons8cce4912023-02-12 16:07:05 +0100400 if (rgnbase <= base && end <= rgnend) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200401 if (flags == rgnflags)
402 /* Already have this region, so we're done */
403 return 0;
404 else
405 return -1; /* regions with new flags */
406 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600407
Sughosh Ganu50e27272024-08-26 17:29:19 +0530408 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
409 if (ret > 0) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200410 if (flags != rgnflags)
411 break;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530412 rgn[i].base -= size;
413 rgn[i].size += size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600414 coalesced++;
415 break;
Sughosh Ganu50e27272024-08-26 17:29:19 +0530416 } else if (ret < 0) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200417 if (flags != rgnflags)
418 break;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530419 rgn[i].size += size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600420 coalesced++;
421 break;
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100422 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
Sughosh Ganu50e27272024-08-26 17:29:19 +0530423 if (flags == LMB_NONE) {
424 ret = lmb_resize_regions(lmb_rgn_lst, i, base,
425 size);
426 if (ret < 0)
427 return -1;
428
429 coalesced++;
430 break;
431 } else {
432 return -1;
433 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600434 }
435 }
436
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530437 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
438 rgn = lmb_rgn_lst->data;
439 if (rgn[i].flags == rgn[i + 1].flags) {
440 if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
441 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
442 coalesced++;
443 } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
444 /* fix overlapping area */
445 lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
446 coalesced++;
447 }
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200448 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600449 }
450
451 if (coalesced)
Ilias Apalodimas98491252024-10-23 18:22:00 +0300452 return 0;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530453
454 if (alist_full(lmb_rgn_lst) &&
455 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600456 return -1;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530457 rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600458
459 /* Couldn't coalesce the LMB, so add it to the sorted table. */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530460 for (i = lmb_rgn_lst->count; i >= 0; i--) {
461 if (i && base < rgn[i - 1].base) {
462 rgn[i] = rgn[i - 1];
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600463 } else {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530464 rgn[i].base = base;
465 rgn[i].size = size;
466 rgn[i].flags = flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600467 break;
468 }
469 }
470
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530471 lmb_rgn_lst->count++;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600472
473 return 0;
474}
475
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530476static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200477 phys_size_t size)
478{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530479 return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200480}
481
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600482/* This routine may be called with relocation disabled. */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530483long lmb_add(phys_addr_t base, phys_size_t size)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600484{
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530485 long ret;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530486 struct alist *lmb_rgn_lst = &lmb.free_mem;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600487
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530488 ret = lmb_add_region(lmb_rgn_lst, base, size);
Ilias Apalodimas98491252024-10-23 18:22:00 +0300489 if (ret)
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530490 return ret;
491
Ilias Apalodimas2930f892024-10-23 18:22:01 +0300492 return lmb_map_update_notify(base, size, MAP_OP_ADD, LMB_NONE);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600493}
494
Janne Grunaud9c70402024-11-11 07:56:31 +0100495static long _lmb_free(struct alist *lmb_rgn_lst, phys_addr_t base,
496 phys_size_t size)
Andy Fleming1ae346c2008-06-16 13:58:54 -0500497{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530498 struct lmb_region *rgn;
Andy Fleming09d2a712008-07-07 14:24:39 -0500499 phys_addr_t rgnbegin, rgnend;
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100500 phys_addr_t end = base + size - 1;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500501 int i;
502
503 rgnbegin = rgnend = 0; /* supress gcc warnings */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530504 rgn = lmb_rgn_lst->data;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500505 /* Find the region where (base, size) belongs to */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530506 for (i = 0; i < lmb_rgn_lst->count; i++) {
507 rgnbegin = rgn[i].base;
508 rgnend = rgnbegin + rgn[i].size - 1;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500509
510 if ((rgnbegin <= base) && (end <= rgnend))
511 break;
512 }
513
514 /* Didn't find the region */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530515 if (i == lmb_rgn_lst->count)
Andy Fleming1ae346c2008-06-16 13:58:54 -0500516 return -1;
517
518 /* Check to see if we are removing entire region */
519 if ((rgnbegin == base) && (rgnend == end)) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530520 lmb_remove_region(lmb_rgn_lst, i);
Andy Fleming1ae346c2008-06-16 13:58:54 -0500521 return 0;
522 }
523
524 /* Check to see if region is matching at the front */
525 if (rgnbegin == base) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530526 rgn[i].base = end + 1;
527 rgn[i].size -= size;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500528 return 0;
529 }
530
531 /* Check to see if the region is matching at the end */
532 if (rgnend == end) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530533 rgn[i].size -= size;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500534 return 0;
535 }
536
537 /*
538 * We need to split the entry - adjust the current one to the
539 * beginging of the hole and add the region after hole.
540 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530541 rgn[i].size = base - rgn[i].base;
542 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
543 rgn[i].flags);
Andy Fleming1ae346c2008-06-16 13:58:54 -0500544}
545
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530546/**
547 * lmb_free_flags() - Free up a region of memory
548 * @base: Base Address of region to be freed
549 * @size: Size of the region to be freed
550 * @flags: Memory region attributes
551 *
552 * Free up a region of memory.
553 *
554 * Return: 0 if successful, -1 on failure
555 */
556long lmb_free_flags(phys_addr_t base, phys_size_t size,
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530557 uint flags)
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530558{
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530559 long ret;
560
Janne Grunaud9c70402024-11-11 07:56:31 +0100561 ret = _lmb_free(&lmb.used_mem, base, size);
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530562 if (ret < 0)
563 return ret;
564
Ilias Apalodimas2930f892024-10-23 18:22:01 +0300565 return lmb_map_update_notify(base, size, MAP_OP_FREE, flags);
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530566}
567
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530568long lmb_free(phys_addr_t base, phys_size_t size)
569{
570 return lmb_free_flags(base, size, LMB_NONE);
571}
572
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530573long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600574{
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530575 long ret = 0;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530576 struct alist *lmb_rgn_lst = &lmb.used_mem;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600577
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530578 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
Ilias Apalodimas98491252024-10-23 18:22:00 +0300579 if (ret)
580 return ret;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530581
Ilias Apalodimas2930f892024-10-23 18:22:01 +0300582 return lmb_map_update_notify(base, size, MAP_OP_RESERVE, flags);
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200583}
584
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530585long lmb_reserve(phys_addr_t base, phys_size_t size)
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200586{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530587 return lmb_reserve_flags(base, size, LMB_NONE);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600588}
589
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530590static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
Becky Bruced26d67c2008-06-09 20:37:18 -0500591 phys_size_t size)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600592{
593 unsigned long i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530594 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600595
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530596 for (i = 0; i < lmb_rgn_lst->count; i++) {
597 phys_addr_t rgnbase = rgn[i].base;
598 phys_size_t rgnsize = rgn[i].size;
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100599 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600600 break;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600601 }
602
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530603 return (i < lmb_rgn_lst->count) ? i : -1;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600604}
605
Becky Bruced26d67c2008-06-09 20:37:18 -0500606static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600607{
608 return addr & ~(size - 1);
609}
610
Sughosh Ganu99b59662024-10-15 21:07:17 +0530611static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
Sughosh Ganu50e27272024-08-26 17:29:19 +0530612 phys_addr_t max_addr, enum lmb_flags flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600613{
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530614 int ret;
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100615 long i, rgn;
Becky Bruced26d67c2008-06-09 20:37:18 -0500616 phys_addr_t base = 0;
Andy Fleming78bd5a72008-06-16 13:58:55 -0500617 phys_addr_t res_base;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530618 struct lmb_region *lmb_used = lmb.used_mem.data;
619 struct lmb_region *lmb_memory = lmb.free_mem.data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600620
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530621 for (i = lmb.free_mem.count - 1; i >= 0; i--) {
622 phys_addr_t lmbbase = lmb_memory[i].base;
623 phys_size_t lmbsize = lmb_memory[i].size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600624
Andy Fleming78bd5a72008-06-16 13:58:55 -0500625 if (lmbsize < size)
626 continue;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600627 if (max_addr == LMB_ALLOC_ANYWHERE)
628 base = lmb_align_down(lmbbase + lmbsize - size, align);
629 else if (lmbbase < max_addr) {
Stephen Warrenb6a010b2014-07-31 13:40:07 -0600630 base = lmbbase + lmbsize;
631 if (base < lmbbase)
632 base = -1;
633 base = min(base, max_addr);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600634 base = lmb_align_down(base - size, align);
635 } else
636 continue;
637
Andy Fleming78bd5a72008-06-16 13:58:55 -0500638 while (base && lmbbase <= base) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530639 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100640 if (rgn < 0) {
Andy Fleming78bd5a72008-06-16 13:58:55 -0500641 /* This area isn't reserved, take it */
Sughosh Ganu50e27272024-08-26 17:29:19 +0530642 if (lmb_add_region_flags(&lmb.used_mem, base,
Ilias Apalodimas98491252024-10-23 18:22:00 +0300643 size, flags))
Andy Fleming78bd5a72008-06-16 13:58:55 -0500644 return 0;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530645
Ilias Apalodimas2930f892024-10-23 18:22:01 +0300646 ret = lmb_map_update_notify(base, size,
647 MAP_OP_RESERVE,
648 flags);
649 if (ret)
650 return ret;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530651
Andy Fleming78bd5a72008-06-16 13:58:55 -0500652 return base;
653 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530654
655 res_base = lmb_used[rgn].base;
Andy Fleming78bd5a72008-06-16 13:58:55 -0500656 if (res_base < size)
657 break;
658 base = lmb_align_down(res_base - size, align);
659 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600660 }
Andy Fleming78bd5a72008-06-16 13:58:55 -0500661 return 0;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600662}
663
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530664phys_addr_t lmb_alloc(phys_size_t size, ulong align)
Sughosh Ganu78435de2024-08-26 17:29:16 +0530665{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530666 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
Sughosh Ganu78435de2024-08-26 17:29:16 +0530667}
668
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530669phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
Sughosh Ganu78435de2024-08-26 17:29:16 +0530670{
671 phys_addr_t alloc;
672
Sughosh Ganu99b59662024-10-15 21:07:17 +0530673 alloc = _lmb_alloc_base(size, align, max_addr, LMB_NONE);
Sughosh Ganu78435de2024-08-26 17:29:16 +0530674
675 if (alloc == 0)
676 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
677 (ulong)size, (ulong)max_addr);
678
679 return alloc;
680}
681
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530682/**
683 * lmb_alloc_base_flags() - Allocate specified memory region with specified attributes
684 * @size: Size of the region requested
685 * @align: Alignment of the memory region requested
686 * @max_addr: Maximum address of the requested region
687 * @flags: Memory region attributes to be set
688 *
689 * Allocate a region of memory with the attributes specified through the
690 * parameter. The max_addr parameter is used to specify the maximum address
691 * below which the requested region should be allocated.
692 *
693 * Return: base address on success, 0 on error
694 */
695phys_addr_t lmb_alloc_base_flags(phys_size_t size, ulong align,
696 phys_addr_t max_addr, uint flags)
697{
698 phys_addr_t alloc;
699
Sughosh Ganu99b59662024-10-15 21:07:17 +0530700 alloc = _lmb_alloc_base(size, align, max_addr, flags);
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530701
702 if (alloc == 0)
703 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
704 (ulong)size, (ulong)max_addr);
705
706 return alloc;
707}
708
Sughosh Ganu99b59662024-10-15 21:07:17 +0530709static phys_addr_t _lmb_alloc_addr(phys_addr_t base, phys_size_t size,
Sughosh Ganu50e27272024-08-26 17:29:19 +0530710 enum lmb_flags flags)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100711{
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100712 long rgn;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530713 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100714
715 /* Check if the requested address is in one of the memory regions */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530716 rgn = lmb_overlaps_region(&lmb.free_mem, base, size);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100717 if (rgn >= 0) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100718 /*
719 * Check if the requested end address is in the same memory
720 * region we found.
721 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530722 if (lmb_addrs_overlap(lmb_memory[rgn].base,
723 lmb_memory[rgn].size,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100724 base + size - 1, 1)) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100725 /* ok, reserve the memory */
Sughosh Ganu50e27272024-08-26 17:29:19 +0530726 if (lmb_reserve_flags(base, size, flags) >= 0)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100727 return base;
728 }
729 }
Sughosh Ganu50e27272024-08-26 17:29:19 +0530730
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100731 return 0;
732}
733
Sughosh Ganu50e27272024-08-26 17:29:19 +0530734/*
735 * Try to allocate a specific address range: must be in defined memory but not
736 * reserved
737 */
738phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
739{
Sughosh Ganu99b59662024-10-15 21:07:17 +0530740 return _lmb_alloc_addr(base, size, LMB_NONE);
Sughosh Ganu50e27272024-08-26 17:29:19 +0530741}
742
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530743/**
744 * lmb_alloc_addr_flags() - Allocate specified memory address with specified attributes
745 * @base: Base Address requested
746 * @size: Size of the region requested
747 * @flags: Memory region attributes to be set
748 *
749 * Allocate a region of memory with the attributes specified through the
750 * parameter. The base parameter is used to specify the base address
751 * of the requested region.
752 *
753 * Return: base address on success, 0 on error
754 */
755phys_addr_t lmb_alloc_addr_flags(phys_addr_t base, phys_size_t size,
756 uint flags)
757{
Sughosh Ganu99b59662024-10-15 21:07:17 +0530758 return _lmb_alloc_addr(base, size, flags);
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530759}
760
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100761/* Return number of bytes from a given address that are free */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530762phys_size_t lmb_get_free_size(phys_addr_t addr)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100763{
764 int i;
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100765 long rgn;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530766 struct lmb_region *lmb_used = lmb.used_mem.data;
767 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100768
769 /* check if the requested address is in the memory regions */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530770 rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100771 if (rgn >= 0) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530772 for (i = 0; i < lmb.used_mem.count; i++) {
773 if (addr < lmb_used[i].base) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100774 /* first reserved range > requested address */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530775 return lmb_used[i].base - addr;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100776 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530777 if (lmb_used[i].base +
778 lmb_used[i].size > addr) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100779 /* requested addr is in this reserved range */
780 return 0;
781 }
782 }
783 /* if we come here: no reserved ranges above requested addr */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530784 return lmb_memory[lmb.free_mem.count - 1].base +
785 lmb_memory[lmb.free_mem.count - 1].size - addr;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100786 }
787 return 0;
788}
789
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530790int lmb_is_reserved_flags(phys_addr_t addr, int flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600791{
792 int i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530793 struct lmb_region *lmb_used = lmb.used_mem.data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600794
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530795 for (i = 0; i < lmb.used_mem.count; i++) {
796 phys_addr_t upper = lmb_used[i].base +
797 lmb_used[i].size - 1;
798 if (addr >= lmb_used[i].base && addr <= upper)
799 return (lmb_used[i].flags & flags) == flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600800 }
801 return 0;
802}
Mike Frysingera0dadf82009-11-03 11:35:59 -0500803
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530804static int lmb_setup(bool test)
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530805{
806 bool ret;
807
808 ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region),
809 (uint)LMB_ALIST_INITIAL_SIZE);
810 if (!ret) {
811 log_debug("Unable to initialise the list for LMB free memory\n");
812 return -ENOMEM;
813 }
814
815 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
816 (uint)LMB_ALIST_INITIAL_SIZE);
817 if (!ret) {
818 log_debug("Unable to initialise the list for LMB used memory\n");
819 return -ENOMEM;
820 }
821
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530822 lmb.test = test;
823
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530824 return 0;
825}
826
827/**
828 * lmb_init() - Initialise the LMB module
829 *
830 * Initialise the LMB lists needed for keeping the memory map. There
831 * are two lists, in form of alloced list data structure. One for the
832 * available memory, and one for the used memory. Initialise the two
833 * lists as part of board init. Add memory to the available memory
834 * list and reserve common areas by adding them to the used memory
835 * list.
836 *
837 * Return: 0 on success, -ve on error
838 */
839int lmb_init(void)
840{
841 int ret;
842
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530843 ret = lmb_setup(false);
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530844 if (ret) {
845 log_info("Unable to init LMB\n");
846 return ret;
847 }
848
Sughosh Ganu65597fa2024-08-26 17:29:23 +0530849 lmb_add_memory();
850
Sughosh Ganu6518b412024-08-26 17:29:24 +0530851 /* Reserve the U-Boot image region once U-Boot has relocated */
Simon Glassd4dce4a2024-09-29 19:49:36 -0600852 if (xpl_phase() == PHASE_SPL)
Sughosh Ganu6518b412024-08-26 17:29:24 +0530853 lmb_reserve_common_spl();
Simon Glassd4dce4a2024-09-29 19:49:36 -0600854 else if (xpl_phase() == PHASE_BOARD_R)
Sughosh Ganu6518b412024-08-26 17:29:24 +0530855 lmb_reserve_common((void *)gd->fdt_blob);
856
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530857 return 0;
858}
859
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530860struct lmb *lmb_get(void)
861{
862 return &lmb;
863}
864
Simon Glass176eba52024-10-21 10:19:31 +0200865#if CONFIG_IS_ENABLED(UNIT_TEST)
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530866int lmb_push(struct lmb *store)
867{
868 int ret;
869
870 *store = lmb;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530871 ret = lmb_setup(true);
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530872 if (ret)
873 return ret;
874
875 return 0;
876}
877
878void lmb_pop(struct lmb *store)
879{
880 alist_uninit(&lmb.free_mem);
881 alist_uninit(&lmb.used_mem);
882 lmb = *store;
883}
884#endif /* UNIT_TEST */