blob: 74ffa9f9272fcc1a184828470695aa44879f5ed0 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kumar Gala6d7bfa82008-02-27 21:51:47 -06002/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
Kumar Gala6d7bfa82008-02-27 21:51:47 -06007 */
8
Sughosh Ganu291bf9c2024-08-26 17:29:18 +05309#include <alist.h>
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +010010#include <efi_loader.h>
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053011#include <event.h>
Simon Glass2dc9c342020-05-10 11:40:01 -060012#include <image.h>
Heinrich Schuchardtc9bb2eb2023-01-04 01:36:14 +010013#include <mapmem.h>
Kumar Gala6d7bfa82008-02-27 21:51:47 -060014#include <lmb.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <malloc.h>
Sughosh Ganu6518b412024-08-26 17:29:24 +053017#include <spl.h>
Kumar Gala6d7bfa82008-02-27 21:51:47 -060018
Marek Vasuta2eec022021-09-10 22:47:09 +020019#include <asm/global_data.h>
Marek Vasut0fcae7f2021-11-13 18:34:37 +010020#include <asm/sections.h>
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053021#include <linux/kernel.h>
Sughosh Ganue5348c72024-08-26 17:29:30 +053022#include <linux/sizes.h>
Marek Vasuta2eec022021-09-10 22:47:09 +020023
24DECLARE_GLOBAL_DATA_PTR;
25
Sughosh Ganua3af5ba2024-10-15 21:07:07 +053026#define MAP_OP_RESERVE (u8)0x1
27#define MAP_OP_FREE (u8)0x2
28#define MAP_OP_ADD (u8)0x3
29
Janne Grunau5631f442024-11-11 07:56:32 +010030/*
31 * The following low level LMB functions must not access the global LMB memory
32 * map since they are also used to manage IOVA memory maps in iommu drivers like
33 * apple_dart.
34 */
Kumar Gala6d7bfa82008-02-27 21:51:47 -060035
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +010036static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
37 phys_addr_t base2, phys_size_t size2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060038{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +010039 const phys_addr_t base1_end = base1 + size1 - 1;
40 const phys_addr_t base2_end = base2 + size2 - 1;
41
42 return ((base1 <= base2_end) && (base2 <= base1_end));
Kumar Gala6d7bfa82008-02-27 21:51:47 -060043}
44
Becky Bruced26d67c2008-06-09 20:37:18 -050045static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +010046 phys_addr_t base2, phys_size_t size2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060047{
48 if (base2 == base1 + size1)
49 return 1;
50 else if (base1 == base2 + size2)
51 return -1;
52
53 return 0;
54}
55
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053056static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
Udit Kumar4e8e6632023-09-26 16:54:42 +053057 unsigned long r2)
58{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053059 struct lmb_region *rgn = lmb_rgn_lst->data;
60
61 phys_addr_t base1 = rgn[r1].base;
62 phys_size_t size1 = rgn[r1].size;
63 phys_addr_t base2 = rgn[r2].base;
64 phys_size_t size2 = rgn[r2].size;
Udit Kumar4e8e6632023-09-26 16:54:42 +053065
66 return lmb_addrs_overlap(base1, size1, base2, size2);
67}
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053068
69static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +010070 unsigned long r2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060071{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053072 struct lmb_region *rgn = lmb_rgn_lst->data;
73
74 phys_addr_t base1 = rgn[r1].base;
75 phys_size_t size1 = rgn[r1].size;
76 phys_addr_t base2 = rgn[r2].base;
77 phys_size_t size2 = rgn[r2].size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -060078 return lmb_addrs_adjacent(base1, size1, base2, size2);
79}
80
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053081static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060082{
83 unsigned long i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053084 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -060085
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053086 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
87 rgn[i].base = rgn[i + 1].base;
88 rgn[i].size = rgn[i + 1].size;
89 rgn[i].flags = rgn[i + 1].flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -060090 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053091 lmb_rgn_lst->count--;
Kumar Gala6d7bfa82008-02-27 21:51:47 -060092}
93
94/* Assumption: base addr of region 1 < base addr of region 2 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053095static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +010096 unsigned long r2)
Kumar Gala6d7bfa82008-02-27 21:51:47 -060097{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +053098 struct lmb_region *rgn = lmb_rgn_lst->data;
99
100 rgn[r1].size += rgn[r2].size;
101 lmb_remove_region(lmb_rgn_lst, r2);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600102}
103
Udit Kumar4e8e6632023-09-26 16:54:42 +0530104/*Assumption : base addr of region 1 < base addr of region 2*/
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530105static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
106 unsigned long r1, unsigned long r2)
Udit Kumar4e8e6632023-09-26 16:54:42 +0530107{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530108 struct lmb_region *rgn = lmb_rgn_lst->data;
109
110 phys_addr_t base1 = rgn[r1].base;
111 phys_size_t size1 = rgn[r1].size;
112 phys_addr_t base2 = rgn[r2].base;
113 phys_size_t size2 = rgn[r2].size;
Udit Kumar4e8e6632023-09-26 16:54:42 +0530114
115 if (base1 + size1 > base2 + size2) {
116 printf("This will not be a case any time\n");
117 return;
118 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530119 rgn[r1].size = base2 + size2 - base1;
120 lmb_remove_region(lmb_rgn_lst, r2);
Udit Kumar4e8e6632023-09-26 16:54:42 +0530121}
122
Sughosh Ganu50e27272024-08-26 17:29:19 +0530123static long lmb_resize_regions(struct alist *lmb_rgn_lst,
124 unsigned long idx_start,
125 phys_addr_t base, phys_size_t size)
126{
127 phys_size_t rgnsize;
128 unsigned long rgn_cnt, idx, idx_end;
129 phys_addr_t rgnbase, rgnend;
130 phys_addr_t mergebase, mergeend;
131 struct lmb_region *rgn = lmb_rgn_lst->data;
132
133 rgn_cnt = 0;
134 idx = idx_start;
135 idx_end = idx_start;
136
137 /*
138 * First thing to do is to identify how many regions
139 * the requested region overlaps.
140 * If the flags match, combine all these overlapping
141 * regions into a single region, and remove the merged
142 * regions.
143 */
144 while (idx <= lmb_rgn_lst->count - 1) {
145 rgnbase = rgn[idx].base;
146 rgnsize = rgn[idx].size;
147
148 if (lmb_addrs_overlap(base, size, rgnbase,
149 rgnsize)) {
150 if (rgn[idx].flags != LMB_NONE)
151 return -1;
152 rgn_cnt++;
153 idx_end = idx;
154 }
155 idx++;
156 }
157
158 /* The merged region's base and size */
159 rgnbase = rgn[idx_start].base;
160 mergebase = min(base, rgnbase);
161 rgnend = rgn[idx_end].base + rgn[idx_end].size;
162 mergeend = max(rgnend, (base + size));
163
164 rgn[idx_start].base = mergebase;
165 rgn[idx_start].size = mergeend - mergebase;
166
167 /* Now remove the merged regions */
168 while (--rgn_cnt)
169 lmb_remove_region(lmb_rgn_lst, idx_start + 1);
170
171 return 0;
172}
173
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530174/**
175 * lmb_add_region_flags() - Add an lmb region to the given list
176 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
177 * @base: Start address of the region
178 * @size: Size of the region to be added
179 * @flags: Attributes of the LMB region
180 *
181 * Add a region of memory to the list. If the region does not exist, add
182 * it to the list. Depending on the attributes of the region to be added,
183 * the function might resize an already existing region or coalesce two
184 * adjacent regions.
185 *
186 *
187 * Returns: 0 if the region addition successful, -1 on failure
188 */
189static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200190 phys_size_t size, enum lmb_flags flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600191{
192 unsigned long coalesced = 0;
Sughosh Ganu50e27272024-08-26 17:29:19 +0530193 long ret, i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530194 struct lmb_region *rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600195
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530196 if (alist_err(lmb_rgn_lst))
197 return -1;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600198
199 /* First try and coalesce this LMB with another. */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530200 for (i = 0; i < lmb_rgn_lst->count; i++) {
201 phys_addr_t rgnbase = rgn[i].base;
202 phys_size_t rgnsize = rgn[i].size;
203 phys_size_t rgnflags = rgn[i].flags;
Sjoerd Simons8cce4912023-02-12 16:07:05 +0100204 phys_addr_t end = base + size - 1;
205 phys_addr_t rgnend = rgnbase + rgnsize - 1;
Sjoerd Simons8cce4912023-02-12 16:07:05 +0100206 if (rgnbase <= base && end <= rgnend) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200207 if (flags == rgnflags)
208 /* Already have this region, so we're done */
209 return 0;
210 else
211 return -1; /* regions with new flags */
212 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600213
Sughosh Ganu50e27272024-08-26 17:29:19 +0530214 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
215 if (ret > 0) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200216 if (flags != rgnflags)
217 break;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530218 rgn[i].base -= size;
219 rgn[i].size += size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600220 coalesced++;
221 break;
Sughosh Ganu50e27272024-08-26 17:29:19 +0530222 } else if (ret < 0) {
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200223 if (flags != rgnflags)
224 break;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530225 rgn[i].size += size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600226 coalesced++;
227 break;
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100228 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
Sughosh Ganu50e27272024-08-26 17:29:19 +0530229 if (flags == LMB_NONE) {
230 ret = lmb_resize_regions(lmb_rgn_lst, i, base,
231 size);
232 if (ret < 0)
233 return -1;
234
235 coalesced++;
236 break;
237 } else {
238 return -1;
239 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600240 }
241 }
242
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530243 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
244 rgn = lmb_rgn_lst->data;
245 if (rgn[i].flags == rgn[i + 1].flags) {
246 if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
247 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
248 coalesced++;
249 } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
250 /* fix overlapping area */
251 lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
252 coalesced++;
253 }
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200254 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600255 }
256
257 if (coalesced)
Ilias Apalodimas98491252024-10-23 18:22:00 +0300258 return 0;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530259
260 if (alist_full(lmb_rgn_lst) &&
261 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600262 return -1;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530263 rgn = lmb_rgn_lst->data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600264
265 /* Couldn't coalesce the LMB, so add it to the sorted table. */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530266 for (i = lmb_rgn_lst->count; i >= 0; i--) {
267 if (i && base < rgn[i - 1].base) {
268 rgn[i] = rgn[i - 1];
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600269 } else {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530270 rgn[i].base = base;
271 rgn[i].size = size;
272 rgn[i].flags = flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600273 break;
274 }
275 }
276
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530277 lmb_rgn_lst->count++;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600278
279 return 0;
280}
281
Janne Grunaud9c70402024-11-11 07:56:31 +0100282static long _lmb_free(struct alist *lmb_rgn_lst, phys_addr_t base,
283 phys_size_t size)
Andy Fleming1ae346c2008-06-16 13:58:54 -0500284{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530285 struct lmb_region *rgn;
Andy Fleming09d2a712008-07-07 14:24:39 -0500286 phys_addr_t rgnbegin, rgnend;
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100287 phys_addr_t end = base + size - 1;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500288 int i;
289
290 rgnbegin = rgnend = 0; /* supress gcc warnings */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530291 rgn = lmb_rgn_lst->data;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500292 /* Find the region where (base, size) belongs to */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530293 for (i = 0; i < lmb_rgn_lst->count; i++) {
294 rgnbegin = rgn[i].base;
295 rgnend = rgnbegin + rgn[i].size - 1;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500296
297 if ((rgnbegin <= base) && (end <= rgnend))
298 break;
299 }
300
301 /* Didn't find the region */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530302 if (i == lmb_rgn_lst->count)
Andy Fleming1ae346c2008-06-16 13:58:54 -0500303 return -1;
304
305 /* Check to see if we are removing entire region */
306 if ((rgnbegin == base) && (rgnend == end)) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530307 lmb_remove_region(lmb_rgn_lst, i);
Andy Fleming1ae346c2008-06-16 13:58:54 -0500308 return 0;
309 }
310
311 /* Check to see if region is matching at the front */
312 if (rgnbegin == base) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530313 rgn[i].base = end + 1;
314 rgn[i].size -= size;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500315 return 0;
316 }
317
318 /* Check to see if the region is matching at the end */
319 if (rgnend == end) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530320 rgn[i].size -= size;
Andy Fleming1ae346c2008-06-16 13:58:54 -0500321 return 0;
322 }
323
324 /*
325 * We need to split the entry - adjust the current one to the
326 * beginging of the hole and add the region after hole.
327 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530328 rgn[i].size = base - rgn[i].base;
329 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
330 rgn[i].flags);
Andy Fleming1ae346c2008-06-16 13:58:54 -0500331}
332
Janne Grunau5631f442024-11-11 07:56:32 +0100333static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
334 phys_size_t size)
335{
336 unsigned long i;
337 struct lmb_region *rgn = lmb_rgn_lst->data;
338
339 for (i = 0; i < lmb_rgn_lst->count; i++) {
340 phys_addr_t rgnbase = rgn[i].base;
341 phys_size_t rgnsize = rgn[i].size;
342 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
343 break;
344 }
345
346 return (i < lmb_rgn_lst->count) ? i : -1;
347}
348
349static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
350{
351 return addr & ~(size - 1);
352}
353
354/*
Janne Grunaue818d3c2024-11-11 07:56:33 +0100355 * IOVA LMB memory maps using lmb pointers instead of the global LMB memory map.
356 */
357
358int io_lmb_setup(struct lmb *io_lmb)
359{
360 int ret;
361
362 ret = alist_init(&io_lmb->free_mem, sizeof(struct lmb_region),
363 (uint)LMB_ALIST_INITIAL_SIZE);
364 if (!ret) {
365 log_debug("Unable to initialise the list for LMB free IOVA\n");
366 return -ENOMEM;
367 }
368
369 ret = alist_init(&io_lmb->used_mem, sizeof(struct lmb_region),
370 (uint)LMB_ALIST_INITIAL_SIZE);
371 if (!ret) {
372 log_debug("Unable to initialise the list for LMB used IOVA\n");
373 return -ENOMEM;
374 }
375
376 io_lmb->test = false;
377
378 return 0;
379}
380
381void io_lmb_teardown(struct lmb *io_lmb)
382{
383 alist_uninit(&io_lmb->free_mem);
384 alist_uninit(&io_lmb->used_mem);
385}
386
387long io_lmb_add(struct lmb *io_lmb, phys_addr_t base, phys_size_t size)
388{
389 return lmb_add_region_flags(&io_lmb->free_mem, base, size, LMB_NONE);
390}
391
392/* derived and simplified from _lmb_alloc_base() */
393phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align)
394{
395 long i, rgn;
396 phys_addr_t base = 0;
397 phys_addr_t res_base;
398 struct lmb_region *lmb_used = io_lmb->used_mem.data;
399 struct lmb_region *lmb_memory = io_lmb->free_mem.data;
400
401 for (i = io_lmb->free_mem.count - 1; i >= 0; i--) {
402 phys_addr_t lmbbase = lmb_memory[i].base;
403 phys_size_t lmbsize = lmb_memory[i].size;
404
405 if (lmbsize < size)
406 continue;
407 base = lmb_align_down(lmbbase + lmbsize - size, align);
408
409 while (base && lmbbase <= base) {
410 rgn = lmb_overlaps_region(&io_lmb->used_mem, base, size);
411 if (rgn < 0) {
412 /* This area isn't reserved, take it */
413 if (lmb_add_region_flags(&io_lmb->used_mem, base,
414 size, LMB_NONE) < 0)
415 return 0;
416
417 return base;
418 }
419
420 res_base = lmb_used[rgn].base;
421 if (res_base < size)
422 break;
423 base = lmb_align_down(res_base - size, align);
424 }
425 }
426 return 0;
427}
428
429long io_lmb_free(struct lmb *io_lmb, phys_addr_t base, phys_size_t size)
430{
431 return _lmb_free(&io_lmb->used_mem, base, size);
432}
433
434/*
Janne Grunau5631f442024-11-11 07:56:32 +0100435 * Low level LMB functions are used to manage IOVA memory maps for the Apple
436 * dart iommu. They must not access the global LMB memory map.
437 * So keep the global LMB variable declaration unreachable from them.
438 */
439
440static struct lmb lmb;
441
442static bool lmb_should_notify(enum lmb_flags flags)
443{
444 return !lmb.test && !(flags & LMB_NONOTIFY) &&
445 CONFIG_IS_ENABLED(EFI_LOADER);
446}
447
448static int lmb_map_update_notify(phys_addr_t addr, phys_size_t size, u8 op,
449 enum lmb_flags flags)
450{
451 u64 efi_addr;
452 u64 pages;
453 efi_status_t status;
454
455 if (op != MAP_OP_RESERVE && op != MAP_OP_FREE && op != MAP_OP_ADD) {
456 log_err("Invalid map update op received (%d)\n", op);
457 return -1;
458 }
459
460 if (!lmb_should_notify(flags))
461 return 0;
462
463 efi_addr = (uintptr_t)map_sysmem(addr, 0);
464 pages = efi_size_in_pages(size + (efi_addr & EFI_PAGE_MASK));
465 efi_addr &= ~EFI_PAGE_MASK;
466
467 status = efi_add_memory_map_pg(efi_addr, pages,
468 op == MAP_OP_RESERVE ?
469 EFI_BOOT_SERVICES_DATA :
470 EFI_CONVENTIONAL_MEMORY,
471 false);
472 if (status != EFI_SUCCESS) {
473 log_err("%s: LMB Map notify failure %lu\n", __func__,
474 status & ~EFI_ERROR_MASK);
475 return -1;
476 }
477 unmap_sysmem((void *)(uintptr_t)efi_addr);
478
479 return 0;
480}
481
482static void lmb_print_region_flags(enum lmb_flags flags)
483{
484 u64 bitpos;
485 const char *flag_str[] = { "none", "no-map", "no-overwrite", "no-notify" };
486
487 do {
488 bitpos = flags ? fls(flags) - 1 : 0;
489 assert_noisy(bitpos < ARRAY_SIZE(flag_str));
490 printf("%s", flag_str[bitpos]);
491 flags &= ~(1ull << bitpos);
492 puts(flags ? ", " : "\n");
493 } while (flags);
494}
495
496static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
497{
498 struct lmb_region *rgn = lmb_rgn_lst->data;
499 unsigned long long base, size, end;
500 enum lmb_flags flags;
501 int i;
502
503 printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count);
504
505 for (i = 0; i < lmb_rgn_lst->count; i++) {
506 base = rgn[i].base;
507 size = rgn[i].size;
508 end = base + size - 1;
509 flags = rgn[i].flags;
510
511 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: ",
512 name, i, base, end, size);
513 lmb_print_region_flags(flags);
514 }
515}
516
517void lmb_dump_all_force(void)
518{
519 printf("lmb_dump_all:\n");
520 lmb_dump_region(&lmb.free_mem, "memory");
521 lmb_dump_region(&lmb.used_mem, "reserved");
522}
523
524void lmb_dump_all(void)
525{
526#ifdef DEBUG
527 lmb_dump_all_force();
528#endif
529}
530
531static void lmb_reserve_uboot_region(void)
532{
533 int bank;
534 ulong end, bank_end;
535 phys_addr_t rsv_start;
536
537 rsv_start = gd->start_addr_sp - CONFIG_STACK_SIZE;
538 end = gd->ram_top;
539
540 /*
541 * Reserve memory from aligned address below the bottom of U-Boot stack
542 * until end of RAM area to prevent LMB from overwriting that memory.
543 */
544 debug("## Current stack ends at 0x%08lx ", (ulong)rsv_start);
545
546 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
547 if (!gd->bd->bi_dram[bank].size ||
548 rsv_start < gd->bd->bi_dram[bank].start)
549 continue;
550 /* Watch out for RAM at end of address space! */
551 bank_end = gd->bd->bi_dram[bank].start +
552 gd->bd->bi_dram[bank].size - 1;
553 if (rsv_start > bank_end)
554 continue;
555 if (bank_end > end)
556 bank_end = end - 1;
557
558 lmb_reserve_flags(rsv_start, bank_end - rsv_start + 1,
559 LMB_NOOVERWRITE);
560
561 if (gd->flags & GD_FLG_SKIP_RELOC)
562 lmb_reserve_flags((phys_addr_t)(uintptr_t)_start,
563 gd->mon_len, LMB_NOOVERWRITE);
564
565 break;
566 }
567}
568
569static void lmb_reserve_common(void *fdt_blob)
570{
571 lmb_reserve_uboot_region();
572
573 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
574 boot_fdt_add_mem_rsv_regions(fdt_blob);
575}
576
577static __maybe_unused void lmb_reserve_common_spl(void)
578{
579 phys_addr_t rsv_start;
580 phys_size_t rsv_size;
581
582 /*
583 * Assume a SPL stack of 16KB. This must be
584 * more than enough for the SPL stage.
585 */
586 if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) {
587 rsv_start = gd->start_addr_sp - 16384;
588 rsv_size = 16384;
589 lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
590 }
591
592 if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) {
593 /* Reserve the bss region */
594 rsv_start = (phys_addr_t)(uintptr_t)__bss_start;
595 rsv_size = (phys_addr_t)(uintptr_t)__bss_end -
596 (phys_addr_t)(uintptr_t)__bss_start;
597 lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
598 }
599}
600
601/**
602 * lmb_add_memory() - Add memory range for LMB allocations
603 *
604 * Add the entire available memory range to the pool of memory that
605 * can be used by the LMB module for allocations.
606 *
607 * Return: None
608 */
609void lmb_add_memory(void)
610{
611 int i;
612 phys_size_t size;
613 u64 ram_top = gd->ram_top;
614 struct bd_info *bd = gd->bd;
615
616 if (CONFIG_IS_ENABLED(LMB_ARCH_MEM_MAP))
617 return lmb_arch_add_memory();
618
619 /* Assume a 4GB ram_top if not defined */
620 if (!ram_top)
621 ram_top = 0x100000000ULL;
622
623 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
624 size = bd->bi_dram[i].size;
625 if (size) {
626 lmb_add(bd->bi_dram[i].start, size);
627
628 /*
629 * Reserve memory above ram_top as
630 * no-overwrite so that it cannot be
631 * allocated
632 */
633 if (bd->bi_dram[i].start >= ram_top)
634 lmb_reserve_flags(bd->bi_dram[i].start, size,
635 LMB_NOOVERWRITE);
636 }
637 }
638}
639
640static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base,
641 phys_size_t size)
642{
643 return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
644}
645
646/* This routine may be called with relocation disabled. */
647long lmb_add(phys_addr_t base, phys_size_t size)
648{
649 long ret;
650 struct alist *lmb_rgn_lst = &lmb.free_mem;
651
652 ret = lmb_add_region(lmb_rgn_lst, base, size);
653 if (ret)
654 return ret;
655
656 return lmb_map_update_notify(base, size, MAP_OP_ADD, LMB_NONE);
657}
658
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530659/**
660 * lmb_free_flags() - Free up a region of memory
661 * @base: Base Address of region to be freed
662 * @size: Size of the region to be freed
663 * @flags: Memory region attributes
664 *
665 * Free up a region of memory.
666 *
667 * Return: 0 if successful, -1 on failure
668 */
669long lmb_free_flags(phys_addr_t base, phys_size_t size,
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530670 uint flags)
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530671{
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530672 long ret;
673
Janne Grunaud9c70402024-11-11 07:56:31 +0100674 ret = _lmb_free(&lmb.used_mem, base, size);
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530675 if (ret < 0)
676 return ret;
677
Ilias Apalodimas2930f892024-10-23 18:22:01 +0300678 return lmb_map_update_notify(base, size, MAP_OP_FREE, flags);
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530679}
680
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530681long lmb_free(phys_addr_t base, phys_size_t size)
682{
683 return lmb_free_flags(base, size, LMB_NONE);
684}
685
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530686long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600687{
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530688 long ret = 0;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530689 struct alist *lmb_rgn_lst = &lmb.used_mem;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600690
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530691 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
Ilias Apalodimas98491252024-10-23 18:22:00 +0300692 if (ret)
693 return ret;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530694
Ilias Apalodimas2930f892024-10-23 18:22:01 +0300695 return lmb_map_update_notify(base, size, MAP_OP_RESERVE, flags);
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200696}
697
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530698long lmb_reserve(phys_addr_t base, phys_size_t size)
Patrick Delaunaye11c9082021-05-07 14:50:29 +0200699{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530700 return lmb_reserve_flags(base, size, LMB_NONE);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600701}
702
Sughosh Ganu99b59662024-10-15 21:07:17 +0530703static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
Sughosh Ganu50e27272024-08-26 17:29:19 +0530704 phys_addr_t max_addr, enum lmb_flags flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600705{
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530706 int ret;
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100707 long i, rgn;
Becky Bruced26d67c2008-06-09 20:37:18 -0500708 phys_addr_t base = 0;
Andy Fleming78bd5a72008-06-16 13:58:55 -0500709 phys_addr_t res_base;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530710 struct lmb_region *lmb_used = lmb.used_mem.data;
711 struct lmb_region *lmb_memory = lmb.free_mem.data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600712
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530713 for (i = lmb.free_mem.count - 1; i >= 0; i--) {
714 phys_addr_t lmbbase = lmb_memory[i].base;
715 phys_size_t lmbsize = lmb_memory[i].size;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600716
Andy Fleming78bd5a72008-06-16 13:58:55 -0500717 if (lmbsize < size)
718 continue;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600719 if (max_addr == LMB_ALLOC_ANYWHERE)
720 base = lmb_align_down(lmbbase + lmbsize - size, align);
721 else if (lmbbase < max_addr) {
Stephen Warrenb6a010b2014-07-31 13:40:07 -0600722 base = lmbbase + lmbsize;
723 if (base < lmbbase)
724 base = -1;
725 base = min(base, max_addr);
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600726 base = lmb_align_down(base - size, align);
727 } else
728 continue;
729
Andy Fleming78bd5a72008-06-16 13:58:55 -0500730 while (base && lmbbase <= base) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530731 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100732 if (rgn < 0) {
Andy Fleming78bd5a72008-06-16 13:58:55 -0500733 /* This area isn't reserved, take it */
Sughosh Ganu50e27272024-08-26 17:29:19 +0530734 if (lmb_add_region_flags(&lmb.used_mem, base,
Ilias Apalodimas98491252024-10-23 18:22:00 +0300735 size, flags))
Andy Fleming78bd5a72008-06-16 13:58:55 -0500736 return 0;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530737
Ilias Apalodimas2930f892024-10-23 18:22:01 +0300738 ret = lmb_map_update_notify(base, size,
739 MAP_OP_RESERVE,
740 flags);
741 if (ret)
742 return ret;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530743
Andy Fleming78bd5a72008-06-16 13:58:55 -0500744 return base;
745 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530746
747 res_base = lmb_used[rgn].base;
Andy Fleming78bd5a72008-06-16 13:58:55 -0500748 if (res_base < size)
749 break;
750 base = lmb_align_down(res_base - size, align);
751 }
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600752 }
Andy Fleming78bd5a72008-06-16 13:58:55 -0500753 return 0;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600754}
755
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530756phys_addr_t lmb_alloc(phys_size_t size, ulong align)
Sughosh Ganu78435de2024-08-26 17:29:16 +0530757{
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530758 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
Sughosh Ganu78435de2024-08-26 17:29:16 +0530759}
760
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530761phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
Sughosh Ganu78435de2024-08-26 17:29:16 +0530762{
763 phys_addr_t alloc;
764
Sughosh Ganu99b59662024-10-15 21:07:17 +0530765 alloc = _lmb_alloc_base(size, align, max_addr, LMB_NONE);
Sughosh Ganu78435de2024-08-26 17:29:16 +0530766
767 if (alloc == 0)
768 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
769 (ulong)size, (ulong)max_addr);
770
771 return alloc;
772}
773
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530774/**
775 * lmb_alloc_base_flags() - Allocate specified memory region with specified attributes
776 * @size: Size of the region requested
777 * @align: Alignment of the memory region requested
778 * @max_addr: Maximum address of the requested region
779 * @flags: Memory region attributes to be set
780 *
781 * Allocate a region of memory with the attributes specified through the
782 * parameter. The max_addr parameter is used to specify the maximum address
783 * below which the requested region should be allocated.
784 *
785 * Return: base address on success, 0 on error
786 */
787phys_addr_t lmb_alloc_base_flags(phys_size_t size, ulong align,
788 phys_addr_t max_addr, uint flags)
789{
790 phys_addr_t alloc;
791
Sughosh Ganu99b59662024-10-15 21:07:17 +0530792 alloc = _lmb_alloc_base(size, align, max_addr, flags);
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530793
794 if (alloc == 0)
795 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
796 (ulong)size, (ulong)max_addr);
797
798 return alloc;
799}
800
Sughosh Ganu99b59662024-10-15 21:07:17 +0530801static phys_addr_t _lmb_alloc_addr(phys_addr_t base, phys_size_t size,
Sughosh Ganu50e27272024-08-26 17:29:19 +0530802 enum lmb_flags flags)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100803{
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100804 long rgn;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530805 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100806
807 /* Check if the requested address is in one of the memory regions */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530808 rgn = lmb_overlaps_region(&lmb.free_mem, base, size);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100809 if (rgn >= 0) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100810 /*
811 * Check if the requested end address is in the same memory
812 * region we found.
813 */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530814 if (lmb_addrs_overlap(lmb_memory[rgn].base,
815 lmb_memory[rgn].size,
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100816 base + size - 1, 1)) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100817 /* ok, reserve the memory */
Sughosh Ganu50e27272024-08-26 17:29:19 +0530818 if (lmb_reserve_flags(base, size, flags) >= 0)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100819 return base;
820 }
821 }
Sughosh Ganu50e27272024-08-26 17:29:19 +0530822
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100823 return 0;
824}
825
Sughosh Ganu50e27272024-08-26 17:29:19 +0530826/*
827 * Try to allocate a specific address range: must be in defined memory but not
828 * reserved
829 */
830phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
831{
Sughosh Ganu99b59662024-10-15 21:07:17 +0530832 return _lmb_alloc_addr(base, size, LMB_NONE);
Sughosh Ganu50e27272024-08-26 17:29:19 +0530833}
834
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530835/**
836 * lmb_alloc_addr_flags() - Allocate specified memory address with specified attributes
837 * @base: Base Address requested
838 * @size: Size of the region requested
839 * @flags: Memory region attributes to be set
840 *
841 * Allocate a region of memory with the attributes specified through the
842 * parameter. The base parameter is used to specify the base address
843 * of the requested region.
844 *
845 * Return: base address on success, 0 on error
846 */
847phys_addr_t lmb_alloc_addr_flags(phys_addr_t base, phys_size_t size,
848 uint flags)
849{
Sughosh Ganu99b59662024-10-15 21:07:17 +0530850 return _lmb_alloc_addr(base, size, flags);
Sughosh Ganu7f15d322024-10-15 21:07:03 +0530851}
852
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100853/* Return number of bytes from a given address that are free */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530854phys_size_t lmb_get_free_size(phys_addr_t addr)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100855{
856 int i;
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100857 long rgn;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530858 struct lmb_region *lmb_used = lmb.used_mem.data;
859 struct lmb_region *lmb_memory = lmb.free_mem.data;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100860
861 /* check if the requested address is in the memory regions */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530862 rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1);
Simon Goldschmidtf41f7d62019-01-21 20:29:56 +0100863 if (rgn >= 0) {
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530864 for (i = 0; i < lmb.used_mem.count; i++) {
865 if (addr < lmb_used[i].base) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100866 /* first reserved range > requested address */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530867 return lmb_used[i].base - addr;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100868 }
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530869 if (lmb_used[i].base +
870 lmb_used[i].size > addr) {
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100871 /* requested addr is in this reserved range */
872 return 0;
873 }
874 }
875 /* if we come here: no reserved ranges above requested addr */
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530876 return lmb_memory[lmb.free_mem.count - 1].base +
877 lmb_memory[lmb.free_mem.count - 1].size - addr;
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100878 }
879 return 0;
880}
881
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530882int lmb_is_reserved_flags(phys_addr_t addr, int flags)
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600883{
884 int i;
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530885 struct lmb_region *lmb_used = lmb.used_mem.data;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600886
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530887 for (i = 0; i < lmb.used_mem.count; i++) {
888 phys_addr_t upper = lmb_used[i].base +
889 lmb_used[i].size - 1;
890 if (addr >= lmb_used[i].base && addr <= upper)
891 return (lmb_used[i].flags & flags) == flags;
Kumar Gala6d7bfa82008-02-27 21:51:47 -0600892 }
893 return 0;
894}
Mike Frysingera0dadf82009-11-03 11:35:59 -0500895
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530896static int lmb_setup(bool test)
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530897{
898 bool ret;
899
900 ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region),
901 (uint)LMB_ALIST_INITIAL_SIZE);
902 if (!ret) {
903 log_debug("Unable to initialise the list for LMB free memory\n");
904 return -ENOMEM;
905 }
906
907 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
908 (uint)LMB_ALIST_INITIAL_SIZE);
909 if (!ret) {
910 log_debug("Unable to initialise the list for LMB used memory\n");
911 return -ENOMEM;
912 }
913
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530914 lmb.test = test;
915
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530916 return 0;
917}
918
919/**
920 * lmb_init() - Initialise the LMB module
921 *
922 * Initialise the LMB lists needed for keeping the memory map. There
923 * are two lists, in form of alloced list data structure. One for the
924 * available memory, and one for the used memory. Initialise the two
925 * lists as part of board init. Add memory to the available memory
926 * list and reserve common areas by adding them to the used memory
927 * list.
928 *
929 * Return: 0 on success, -ve on error
930 */
931int lmb_init(void)
932{
933 int ret;
934
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530935 ret = lmb_setup(false);
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530936 if (ret) {
937 log_info("Unable to init LMB\n");
938 return ret;
939 }
940
Sughosh Ganu65597fa2024-08-26 17:29:23 +0530941 lmb_add_memory();
942
Sughosh Ganu6518b412024-08-26 17:29:24 +0530943 /* Reserve the U-Boot image region once U-Boot has relocated */
Simon Glassd4dce4a2024-09-29 19:49:36 -0600944 if (xpl_phase() == PHASE_SPL)
Sughosh Ganu6518b412024-08-26 17:29:24 +0530945 lmb_reserve_common_spl();
Simon Glassd4dce4a2024-09-29 19:49:36 -0600946 else if (xpl_phase() == PHASE_BOARD_R)
Sughosh Ganu6518b412024-08-26 17:29:24 +0530947 lmb_reserve_common((void *)gd->fdt_blob);
948
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530949 return 0;
950}
951
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530952struct lmb *lmb_get(void)
953{
954 return &lmb;
955}
956
Simon Glass176eba52024-10-21 10:19:31 +0200957#if CONFIG_IS_ENABLED(UNIT_TEST)
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530958int lmb_push(struct lmb *store)
959{
960 int ret;
961
962 *store = lmb;
Sughosh Ganua3af5ba2024-10-15 21:07:07 +0530963 ret = lmb_setup(true);
Sughosh Ganu291bf9c2024-08-26 17:29:18 +0530964 if (ret)
965 return ret;
966
967 return 0;
968}
969
970void lmb_pop(struct lmb *store)
971{
972 alist_uninit(&lmb.free_mem);
973 alist_uninit(&lmb.used_mem);
974 lmb = *store;
975}
976#endif /* UNIT_TEST */