blob: 3c66138f7329b272f59603890f1d94762684affa [file] [log] [blame]
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2018 Simon Goldschmidt
4 */
5
Simon Glass75c4d412020-07-19 10:15:37 -06006#include <dm.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +01007#include <lmb.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -07009#include <malloc.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010010#include <dm/test.h>
Simon Glassb4c722a2023-10-01 19:15:21 -060011#include <test/lib.h>
Simon Glass75c4d412020-07-19 10:15:37 -060012#include <test/test.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010013#include <test/ut.h>
14
Heinrich Schuchardta88181e2021-11-14 08:41:07 +010015static inline bool lmb_is_nomap(struct lmb_property *m)
16{
17 return m->flags & LMB_NOMAP;
18}
19
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010020static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
21 phys_addr_t ram_base, phys_size_t ram_size,
22 unsigned long num_reserved,
23 phys_addr_t base1, phys_size_t size1,
24 phys_addr_t base2, phys_size_t size2,
25 phys_addr_t base3, phys_size_t size3)
26{
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010027 if (ram_size) {
28 ut_asserteq(lmb->memory.cnt, 1);
29 ut_asserteq(lmb->memory.region[0].base, ram_base);
30 ut_asserteq(lmb->memory.region[0].size, ram_size);
31 }
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010032
33 ut_asserteq(lmb->reserved.cnt, num_reserved);
34 if (num_reserved > 0) {
35 ut_asserteq(lmb->reserved.region[0].base, base1);
36 ut_asserteq(lmb->reserved.region[0].size, size1);
37 }
38 if (num_reserved > 1) {
39 ut_asserteq(lmb->reserved.region[1].base, base2);
40 ut_asserteq(lmb->reserved.region[1].size, size2);
41 }
42 if (num_reserved > 2) {
43 ut_asserteq(lmb->reserved.region[2].base, base3);
44 ut_asserteq(lmb->reserved.region[2].size, size3);
45 }
46 return 0;
47}
48
49#define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
50 base2, size2, base3, size3) \
51 ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
52 num_reserved, base1, size1, base2, size2, base3, \
53 size3))
54
55/*
56 * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
57 * then does some alloc + free tests.
58 */
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010059static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
60 const phys_size_t ram_size, const phys_addr_t ram0,
61 const phys_size_t ram0_size,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010062 const phys_addr_t alloc_64k_addr)
63{
64 const phys_addr_t ram_end = ram + ram_size;
65 const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
66
67 struct lmb lmb;
68 long ret;
69 phys_addr_t a, a2, b, b2, c, d;
70
71 /* check for overflow */
72 ut_assert(ram_end == 0 || ram_end > ram);
73 ut_assert(alloc_64k_end > alloc_64k_addr);
74 /* check input addresses + size */
75 ut_assert(alloc_64k_addr >= ram + 8);
76 ut_assert(alloc_64k_end <= ram_end - 8);
77
78 lmb_init(&lmb);
79
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010080 if (ram0_size) {
81 ret = lmb_add(&lmb, ram0, ram0_size);
82 ut_asserteq(ret, 0);
83 }
84
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010085 ret = lmb_add(&lmb, ram, ram_size);
86 ut_asserteq(ret, 0);
87
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010088 if (ram0_size) {
89 ut_asserteq(lmb.memory.cnt, 2);
90 ut_asserteq(lmb.memory.region[0].base, ram0);
91 ut_asserteq(lmb.memory.region[0].size, ram0_size);
92 ut_asserteq(lmb.memory.region[1].base, ram);
93 ut_asserteq(lmb.memory.region[1].size, ram_size);
94 } else {
95 ut_asserteq(lmb.memory.cnt, 1);
96 ut_asserteq(lmb.memory.region[0].base, ram);
97 ut_asserteq(lmb.memory.region[0].size, ram_size);
98 }
99
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100100 /* reserve 64KiB somewhere */
101 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
102 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100103 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100104 0, 0, 0, 0);
105
106 /* allocate somewhere, should be at the end of RAM */
107 a = lmb_alloc(&lmb, 4, 1);
108 ut_asserteq(a, ram_end - 4);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100109 ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100110 ram_end - 4, 4, 0, 0);
111 /* alloc below end of reserved region -> below reserved region */
112 b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
113 ut_asserteq(b, alloc_64k_addr - 4);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100114 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100115 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
116
117 /* 2nd time */
118 c = lmb_alloc(&lmb, 4, 1);
119 ut_asserteq(c, ram_end - 8);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100120 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100121 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
122 d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
123 ut_asserteq(d, alloc_64k_addr - 8);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100124 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100125 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
126
127 ret = lmb_free(&lmb, a, 4);
128 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100129 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100130 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
131 /* allocate again to ensure we get the same address */
132 a2 = lmb_alloc(&lmb, 4, 1);
133 ut_asserteq(a, a2);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100134 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100135 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
136 ret = lmb_free(&lmb, a2, 4);
137 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100138 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100139 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
140
141 ret = lmb_free(&lmb, b, 4);
142 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100143 ASSERT_LMB(&lmb, 0, 0, 3,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100144 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
145 ram_end - 8, 4);
146 /* allocate again to ensure we get the same address */
147 b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
148 ut_asserteq(b, b2);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100149 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100150 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
151 ret = lmb_free(&lmb, b2, 4);
152 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100153 ASSERT_LMB(&lmb, 0, 0, 3,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100154 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
155 ram_end - 8, 4);
156
157 ret = lmb_free(&lmb, c, 4);
158 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100159 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100160 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
161 ret = lmb_free(&lmb, d, 4);
162 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100163 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100164 0, 0, 0, 0);
165
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100166 if (ram0_size) {
167 ut_asserteq(lmb.memory.cnt, 2);
168 ut_asserteq(lmb.memory.region[0].base, ram0);
169 ut_asserteq(lmb.memory.region[0].size, ram0_size);
170 ut_asserteq(lmb.memory.region[1].base, ram);
171 ut_asserteq(lmb.memory.region[1].size, ram_size);
172 } else {
173 ut_asserteq(lmb.memory.cnt, 1);
174 ut_asserteq(lmb.memory.region[0].base, ram);
175 ut_asserteq(lmb.memory.region[0].size, ram_size);
176 }
177
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100178 return 0;
179}
180
181static int test_multi_alloc_512mb(struct unit_test_state *uts,
182 const phys_addr_t ram)
183{
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100184 return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
185}
186
187static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
188 const phys_addr_t ram,
189 const phys_addr_t ram0)
190{
191 return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
192 ram + 0x10000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100193}
194
195/* Create a memory region with one reserved region and allocate */
196static int lib_test_lmb_simple(struct unit_test_state *uts)
197{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100198 int ret;
199
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100200 /* simulate 512 MiB RAM beginning at 1GiB */
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100201 ret = test_multi_alloc_512mb(uts, 0x40000000);
202 if (ret)
203 return ret;
204
205 /* simulate 512 MiB RAM beginning at 1.5GiB */
206 return test_multi_alloc_512mb(uts, 0xE0000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100207}
Simon Glassb4c722a2023-10-01 19:15:21 -0600208LIB_TEST(lib_test_lmb_simple, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100209
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100210/* Create two memory regions with one reserved region and allocate */
211static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
212{
213 int ret;
214
215 /* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
216 ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
217 if (ret)
218 return ret;
219
220 /* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
221 return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
222}
Simon Glassb4c722a2023-10-01 19:15:21 -0600223LIB_TEST(lib_test_lmb_simple_x2, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100224
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100225/* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
226static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
227{
228 const phys_size_t ram_size = 0x20000000;
229 const phys_size_t big_block_size = 0x10000000;
230 const phys_addr_t ram_end = ram + ram_size;
231 const phys_addr_t alloc_64k_addr = ram + 0x10000000;
232 struct lmb lmb;
233 long ret;
234 phys_addr_t a, b;
235
236 /* check for overflow */
237 ut_assert(ram_end == 0 || ram_end > ram);
238
239 lmb_init(&lmb);
240
241 ret = lmb_add(&lmb, ram, ram_size);
242 ut_asserteq(ret, 0);
243
244 /* reserve 64KiB in the middle of RAM */
245 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
246 ut_asserteq(ret, 0);
247 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
248 0, 0, 0, 0);
249
250 /* allocate a big block, should be below reserved */
251 a = lmb_alloc(&lmb, big_block_size, 1);
252 ut_asserteq(a, ram);
253 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
254 big_block_size + 0x10000, 0, 0, 0, 0);
255 /* allocate 2nd big block */
256 /* This should fail, printing an error */
257 b = lmb_alloc(&lmb, big_block_size, 1);
258 ut_asserteq(b, 0);
259 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
260 big_block_size + 0x10000, 0, 0, 0, 0);
261
262 ret = lmb_free(&lmb, a, big_block_size);
263 ut_asserteq(ret, 0);
264 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
265 0, 0, 0, 0);
266
267 /* allocate too big block */
268 /* This should fail, printing an error */
269 a = lmb_alloc(&lmb, ram_size, 1);
270 ut_asserteq(a, 0);
271 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
272 0, 0, 0, 0);
273
274 return 0;
275}
276
277static int lib_test_lmb_big(struct unit_test_state *uts)
278{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100279 int ret;
280
281 /* simulate 512 MiB RAM beginning at 1GiB */
282 ret = test_bigblock(uts, 0x40000000);
283 if (ret)
284 return ret;
285
286 /* simulate 512 MiB RAM beginning at 1.5GiB */
287 return test_bigblock(uts, 0xE0000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100288}
Simon Glassb4c722a2023-10-01 19:15:21 -0600289LIB_TEST(lib_test_lmb_big, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100290
291/* Simulate 512 MiB RAM, allocate a block without previous reservation */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100292static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
293 const phys_addr_t alloc_size, const ulong align)
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100294{
295 const phys_size_t ram_size = 0x20000000;
296 const phys_addr_t ram_end = ram + ram_size;
297 struct lmb lmb;
298 long ret;
299 phys_addr_t a, b;
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100300 const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
301 ~(align - 1);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100302
303 /* check for overflow */
304 ut_assert(ram_end == 0 || ram_end > ram);
305
306 lmb_init(&lmb);
307
308 ret = lmb_add(&lmb, ram, ram_size);
309 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100310 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100311
312 /* allocate a block */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100313 a = lmb_alloc(&lmb, alloc_size, align);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100314 ut_assert(a != 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100315 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
316 alloc_size, 0, 0, 0, 0);
317 /* allocate another block */
318 b = lmb_alloc(&lmb, alloc_size, align);
319 ut_assert(b != 0);
320 if (alloc_size == alloc_size_aligned) {
321 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
322 (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
323 0);
324 } else {
325 ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
326 (alloc_size_aligned * 2), alloc_size, ram + ram_size
327 - alloc_size_aligned, alloc_size, 0, 0);
328 }
329 /* and free them */
330 ret = lmb_free(&lmb, b, alloc_size);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100331 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100332 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
333 alloc_size, 0, 0, 0, 0);
334 ret = lmb_free(&lmb, a, alloc_size);
335 ut_asserteq(ret, 0);
336 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100337
338 /* allocate a block with base*/
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100339 b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100340 ut_assert(a == b);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100341 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
342 alloc_size, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100343 /* and free it */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100344 ret = lmb_free(&lmb, b, alloc_size);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100345 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100346 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100347
348 return 0;
349}
350
351static int lib_test_lmb_noreserved(struct unit_test_state *uts)
352{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100353 int ret;
354
355 /* simulate 512 MiB RAM beginning at 1GiB */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100356 ret = test_noreserved(uts, 0x40000000, 4, 1);
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100357 if (ret)
358 return ret;
359
360 /* simulate 512 MiB RAM beginning at 1.5GiB */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100361 return test_noreserved(uts, 0xE0000000, 4, 1);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100362}
Simon Glassb4c722a2023-10-01 19:15:21 -0600363LIB_TEST(lib_test_lmb_noreserved, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100364
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100365static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
366{
367 int ret;
368
369 /* simulate 512 MiB RAM beginning at 1GiB */
370 ret = test_noreserved(uts, 0x40000000, 5, 8);
371 if (ret)
372 return ret;
373
374 /* simulate 512 MiB RAM beginning at 1.5GiB */
375 return test_noreserved(uts, 0xE0000000, 5, 8);
376}
Simon Glassb4c722a2023-10-01 19:15:21 -0600377LIB_TEST(lib_test_lmb_unaligned_size, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100378
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100379/*
380 * Simulate a RAM that starts at 0 and allocate down to address 0, which must
381 * fail as '0' means failure for the lmb_alloc functions.
382 */
383static int lib_test_lmb_at_0(struct unit_test_state *uts)
384{
385 const phys_addr_t ram = 0;
386 const phys_size_t ram_size = 0x20000000;
387 struct lmb lmb;
388 long ret;
389 phys_addr_t a, b;
390
391 lmb_init(&lmb);
392
393 ret = lmb_add(&lmb, ram, ram_size);
394 ut_asserteq(ret, 0);
395
396 /* allocate nearly everything */
397 a = lmb_alloc(&lmb, ram_size - 4, 1);
398 ut_asserteq(a, ram + 4);
399 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
400 0, 0, 0, 0);
401 /* allocate the rest */
402 /* This should fail as the allocated address would be 0 */
403 b = lmb_alloc(&lmb, 4, 1);
404 ut_asserteq(b, 0);
405 /* check that this was an error by checking lmb */
406 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
407 0, 0, 0, 0);
408 /* check that this was an error by freeing b */
409 ret = lmb_free(&lmb, b, 4);
410 ut_asserteq(ret, -1);
411 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
412 0, 0, 0, 0);
413
414 ret = lmb_free(&lmb, a, ram_size - 4);
415 ut_asserteq(ret, 0);
416 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
417
418 return 0;
419}
Simon Glassb4c722a2023-10-01 19:15:21 -0600420LIB_TEST(lib_test_lmb_at_0, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100421
422/* Check that calling lmb_reserve with overlapping regions fails. */
423static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
424{
425 const phys_addr_t ram = 0x40000000;
426 const phys_size_t ram_size = 0x20000000;
427 struct lmb lmb;
428 long ret;
429
430 lmb_init(&lmb);
431
432 ret = lmb_add(&lmb, ram, ram_size);
433 ut_asserteq(ret, 0);
434
435 ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
436 ut_asserteq(ret, 0);
437 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
438 0, 0, 0, 0);
439 /* allocate overlapping region should fail */
440 ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
441 ut_asserteq(ret, -1);
442 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
443 0, 0, 0, 0);
444 /* allocate 3nd region */
445 ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
446 ut_asserteq(ret, 0);
447 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
448 0x40030000, 0x10000, 0, 0);
Udit Kumar27575252023-09-26 16:54:43 +0530449 /* allocate 2nd region , This should coalesced all region into one */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100450 ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
451 ut_assert(ret >= 0);
452 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
453 0, 0, 0, 0);
454
Udit Kumar27575252023-09-26 16:54:43 +0530455 /* allocate 2nd region, which should be added as first region */
456 ret = lmb_reserve(&lmb, 0x40000000, 0x8000);
457 ut_assert(ret >= 0);
458 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x8000,
459 0x40010000, 0x30000, 0, 0);
460
461 /* allocate 3rd region, coalesce with first and overlap with second */
462 ret = lmb_reserve(&lmb, 0x40008000, 0x10000);
463 ut_assert(ret >= 0);
464 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x40000,
465 0, 0, 0, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100466 return 0;
467}
Simon Glassb4c722a2023-10-01 19:15:21 -0600468LIB_TEST(lib_test_lmb_overlapping_reserve, 0);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100469
470/*
471 * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
472 * Expect addresses outside the memory range to fail.
473 */
474static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
475{
476 const phys_size_t ram_size = 0x20000000;
477 const phys_addr_t ram_end = ram + ram_size;
478 const phys_size_t alloc_addr_a = ram + 0x8000000;
479 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
480 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
481 struct lmb lmb;
482 long ret;
483 phys_addr_t a, b, c, d, e;
484
485 /* check for overflow */
486 ut_assert(ram_end == 0 || ram_end > ram);
487
488 lmb_init(&lmb);
489
490 ret = lmb_add(&lmb, ram, ram_size);
491 ut_asserteq(ret, 0);
492
493 /* reserve 3 blocks */
494 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
495 ut_asserteq(ret, 0);
496 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
497 ut_asserteq(ret, 0);
498 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
499 ut_asserteq(ret, 0);
500 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
501 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
502
503 /* allocate blocks */
504 a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
505 ut_asserteq(a, ram);
506 ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
507 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
508 b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
509 alloc_addr_b - alloc_addr_a - 0x10000);
510 ut_asserteq(b, alloc_addr_a + 0x10000);
511 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
512 alloc_addr_c, 0x10000, 0, 0);
513 c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
514 alloc_addr_c - alloc_addr_b - 0x10000);
515 ut_asserteq(c, alloc_addr_b + 0x10000);
516 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
517 0, 0, 0, 0);
518 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
519 ram_end - alloc_addr_c - 0x10000);
520 ut_asserteq(d, alloc_addr_c + 0x10000);
521 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
522 0, 0, 0, 0);
523
524 /* allocating anything else should fail */
525 e = lmb_alloc(&lmb, 1, 1);
526 ut_asserteq(e, 0);
527 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
528 0, 0, 0, 0);
529
530 ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
531 ut_asserteq(ret, 0);
532
533 /* allocate at 3 points in free range */
534
535 d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
536 ut_asserteq(d, ram_end - 4);
537 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
538 d, 4, 0, 0);
539 ret = lmb_free(&lmb, d, 4);
540 ut_asserteq(ret, 0);
541 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
542 0, 0, 0, 0);
543
544 d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
545 ut_asserteq(d, ram_end - 128);
546 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
547 d, 4, 0, 0);
548 ret = lmb_free(&lmb, d, 4);
549 ut_asserteq(ret, 0);
550 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
551 0, 0, 0, 0);
552
553 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
554 ut_asserteq(d, alloc_addr_c + 0x10000);
555 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
556 0, 0, 0, 0);
557 ret = lmb_free(&lmb, d, 4);
558 ut_asserteq(ret, 0);
559 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
560 0, 0, 0, 0);
561
562 /* allocate at the bottom */
563 ret = lmb_free(&lmb, a, alloc_addr_a - ram);
564 ut_asserteq(ret, 0);
565 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
566 0, 0, 0, 0);
567 d = lmb_alloc_addr(&lmb, ram, 4);
568 ut_asserteq(d, ram);
569 ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
570 ram + 0x8000000, 0x10010000, 0, 0);
571
572 /* check that allocating outside memory fails */
573 if (ram_end != 0) {
574 ret = lmb_alloc_addr(&lmb, ram_end, 1);
575 ut_asserteq(ret, 0);
576 }
577 if (ram != 0) {
578 ret = lmb_alloc_addr(&lmb, ram - 1, 1);
579 ut_asserteq(ret, 0);
580 }
581
582 return 0;
583}
584
585static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
586{
587 int ret;
588
589 /* simulate 512 MiB RAM beginning at 1GiB */
590 ret = test_alloc_addr(uts, 0x40000000);
591 if (ret)
592 return ret;
593
594 /* simulate 512 MiB RAM beginning at 1.5GiB */
595 return test_alloc_addr(uts, 0xE0000000);
596}
Simon Glassb4c722a2023-10-01 19:15:21 -0600597LIB_TEST(lib_test_lmb_alloc_addr, 0);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100598
599/* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
600static int test_get_unreserved_size(struct unit_test_state *uts,
601 const phys_addr_t ram)
602{
603 const phys_size_t ram_size = 0x20000000;
604 const phys_addr_t ram_end = ram + ram_size;
605 const phys_size_t alloc_addr_a = ram + 0x8000000;
606 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
607 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
608 struct lmb lmb;
609 long ret;
610 phys_size_t s;
611
612 /* check for overflow */
613 ut_assert(ram_end == 0 || ram_end > ram);
614
615 lmb_init(&lmb);
616
617 ret = lmb_add(&lmb, ram, ram_size);
618 ut_asserteq(ret, 0);
619
620 /* reserve 3 blocks */
621 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
622 ut_asserteq(ret, 0);
623 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
624 ut_asserteq(ret, 0);
625 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
626 ut_asserteq(ret, 0);
627 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
628 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
629
630 /* check addresses in between blocks */
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100631 s = lmb_get_free_size(&lmb, ram);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100632 ut_asserteq(s, alloc_addr_a - ram);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100633 s = lmb_get_free_size(&lmb, ram + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100634 ut_asserteq(s, alloc_addr_a - ram - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100635 s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100636 ut_asserteq(s, 4);
637
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100638 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100639 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100640 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100641 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100642 s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100643 ut_asserteq(s, 4);
644
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100645 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100646 ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100647 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100648 ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100649 s = lmb_get_free_size(&lmb, ram_end - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100650 ut_asserteq(s, 4);
651
652 return 0;
653}
654
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100655static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100656{
657 int ret;
658
659 /* simulate 512 MiB RAM beginning at 1GiB */
660 ret = test_get_unreserved_size(uts, 0x40000000);
661 if (ret)
662 return ret;
663
664 /* simulate 512 MiB RAM beginning at 1.5GiB */
665 return test_get_unreserved_size(uts, 0xE0000000);
666}
Simon Glassb4c722a2023-10-01 19:15:21 -0600667LIB_TEST(lib_test_lmb_get_free_size, 0);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100668
Tom Rinie7256632023-02-08 13:39:18 -0500669#ifdef CONFIG_LMB_USE_MAX_REGIONS
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100670static int lib_test_lmb_max_regions(struct unit_test_state *uts)
671{
672 const phys_addr_t ram = 0x00000000;
Tom Rinie7256632023-02-08 13:39:18 -0500673 /*
674 * All of 32bit memory space will contain regions for this test, so
675 * we need to scale ram_size (which in this case is the size of the lmb
676 * region) to match.
677 */
678 const phys_size_t ram_size = ((0xFFFFFFFF >> CONFIG_LMB_MAX_REGIONS)
679 + 1) * CONFIG_LMB_MAX_REGIONS;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100680 const phys_size_t blk_size = 0x10000;
681 phys_addr_t offset;
682 struct lmb lmb;
683 int ret, i;
684
685 lmb_init(&lmb);
686
687 ut_asserteq(lmb.memory.cnt, 0);
Tom Rinie7256632023-02-08 13:39:18 -0500688 ut_asserteq(lmb.memory.max, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100689 ut_asserteq(lmb.reserved.cnt, 0);
Tom Rinie7256632023-02-08 13:39:18 -0500690 ut_asserteq(lmb.reserved.max, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100691
Tom Rinie7256632023-02-08 13:39:18 -0500692 /* Add CONFIG_LMB_MAX_REGIONS memory regions */
693 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100694 offset = ram + 2 * i * ram_size;
695 ret = lmb_add(&lmb, offset, ram_size);
696 ut_asserteq(ret, 0);
697 }
Tom Rinie7256632023-02-08 13:39:18 -0500698 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100699 ut_asserteq(lmb.reserved.cnt, 0);
700
Tom Rinie7256632023-02-08 13:39:18 -0500701 /* error for the (CONFIG_LMB_MAX_REGIONS + 1) memory regions */
702 offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * ram_size;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100703 ret = lmb_add(&lmb, offset, ram_size);
704 ut_asserteq(ret, -1);
705
Tom Rinie7256632023-02-08 13:39:18 -0500706 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100707 ut_asserteq(lmb.reserved.cnt, 0);
708
Tom Rinie7256632023-02-08 13:39:18 -0500709 /* reserve CONFIG_LMB_MAX_REGIONS regions */
710 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100711 offset = ram + 2 * i * blk_size;
712 ret = lmb_reserve(&lmb, offset, blk_size);
713 ut_asserteq(ret, 0);
714 }
715
Tom Rinie7256632023-02-08 13:39:18 -0500716 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
717 ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100718
719 /* error for the 9th reserved blocks */
Tom Rinie7256632023-02-08 13:39:18 -0500720 offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * blk_size;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100721 ret = lmb_reserve(&lmb, offset, blk_size);
722 ut_asserteq(ret, -1);
723
Tom Rinie7256632023-02-08 13:39:18 -0500724 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
725 ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100726
727 /* check each regions */
Tom Rinie7256632023-02-08 13:39:18 -0500728 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100729 ut_asserteq(lmb.memory.region[i].base, ram + 2 * i * ram_size);
730
Tom Rinie7256632023-02-08 13:39:18 -0500731 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100732 ut_asserteq(lmb.reserved.region[i].base, ram + 2 * i * blk_size);
733
734 return 0;
735}
Simon Glassb4c722a2023-10-01 19:15:21 -0600736LIB_TEST(lib_test_lmb_max_regions, 0);
Tom Rinie7256632023-02-08 13:39:18 -0500737#endif
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100738
Patrick Delaunaya1860722021-05-07 14:50:32 +0200739static int lib_test_lmb_flags(struct unit_test_state *uts)
740{
741 const phys_addr_t ram = 0x40000000;
742 const phys_size_t ram_size = 0x20000000;
743 struct lmb lmb;
744 long ret;
745
746 lmb_init(&lmb);
747
748 ret = lmb_add(&lmb, ram, ram_size);
749 ut_asserteq(ret, 0);
750
751 /* reserve, same flag */
752 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
753 ut_asserteq(ret, 0);
754 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
755 0, 0, 0, 0);
756
757 /* reserve again, same flag */
758 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
759 ut_asserteq(ret, 0);
760 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
761 0, 0, 0, 0);
762
763 /* reserve again, new flag */
764 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NONE);
765 ut_asserteq(ret, -1);
766 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
767 0, 0, 0, 0);
768
769 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
770
771 /* merge after */
772 ret = lmb_reserve_flags(&lmb, 0x40020000, 0x10000, LMB_NOMAP);
773 ut_asserteq(ret, 1);
774 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x20000,
775 0, 0, 0, 0);
776
777 /* merge before */
778 ret = lmb_reserve_flags(&lmb, 0x40000000, 0x10000, LMB_NOMAP);
779 ut_asserteq(ret, 1);
780 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x30000,
781 0, 0, 0, 0);
782
783 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
784
785 ret = lmb_reserve_flags(&lmb, 0x40030000, 0x10000, LMB_NONE);
786 ut_asserteq(ret, 0);
787 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
788 0x40030000, 0x10000, 0, 0);
789
790 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
791 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
792
793 /* test that old API use LMB_NONE */
794 ret = lmb_reserve(&lmb, 0x40040000, 0x10000);
795 ut_asserteq(ret, 1);
796 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
797 0x40030000, 0x20000, 0, 0);
798
799 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
800 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
801
802 ret = lmb_reserve_flags(&lmb, 0x40070000, 0x10000, LMB_NOMAP);
803 ut_asserteq(ret, 0);
804 ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
805 0x40030000, 0x20000, 0x40070000, 0x10000);
806
807 ret = lmb_reserve_flags(&lmb, 0x40050000, 0x10000, LMB_NOMAP);
808 ut_asserteq(ret, 0);
809 ASSERT_LMB(&lmb, ram, ram_size, 4, 0x40000000, 0x30000,
810 0x40030000, 0x20000, 0x40050000, 0x10000);
811
812 /* merge with 2 adjacent regions */
813 ret = lmb_reserve_flags(&lmb, 0x40060000, 0x10000, LMB_NOMAP);
814 ut_asserteq(ret, 2);
815 ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
816 0x40030000, 0x20000, 0x40050000, 0x30000);
817
818 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
819 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
820 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[2]), 1);
821
822 return 0;
823}
Simon Glassb4c722a2023-10-01 19:15:21 -0600824LIB_TEST(lib_test_lmb_flags, 0);