blob: 7e4368de22e29c6af090f5725bcb2d9a89aa20ba [file] [log] [blame]
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2018 Simon Goldschmidt
4 */
5
6#include <common.h>
Simon Glass75c4d412020-07-19 10:15:37 -06007#include <dm.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +01008#include <lmb.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070010#include <malloc.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010011#include <dm/test.h>
Simon Glassb4c722a2023-10-01 19:15:21 -060012#include <test/lib.h>
Simon Glass75c4d412020-07-19 10:15:37 -060013#include <test/test.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010014#include <test/ut.h>
15
Heinrich Schuchardta88181e2021-11-14 08:41:07 +010016static inline bool lmb_is_nomap(struct lmb_property *m)
17{
18 return m->flags & LMB_NOMAP;
19}
20
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010021static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
22 phys_addr_t ram_base, phys_size_t ram_size,
23 unsigned long num_reserved,
24 phys_addr_t base1, phys_size_t size1,
25 phys_addr_t base2, phys_size_t size2,
26 phys_addr_t base3, phys_size_t size3)
27{
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010028 if (ram_size) {
29 ut_asserteq(lmb->memory.cnt, 1);
30 ut_asserteq(lmb->memory.region[0].base, ram_base);
31 ut_asserteq(lmb->memory.region[0].size, ram_size);
32 }
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010033
34 ut_asserteq(lmb->reserved.cnt, num_reserved);
35 if (num_reserved > 0) {
36 ut_asserteq(lmb->reserved.region[0].base, base1);
37 ut_asserteq(lmb->reserved.region[0].size, size1);
38 }
39 if (num_reserved > 1) {
40 ut_asserteq(lmb->reserved.region[1].base, base2);
41 ut_asserteq(lmb->reserved.region[1].size, size2);
42 }
43 if (num_reserved > 2) {
44 ut_asserteq(lmb->reserved.region[2].base, base3);
45 ut_asserteq(lmb->reserved.region[2].size, size3);
46 }
47 return 0;
48}
49
50#define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
51 base2, size2, base3, size3) \
52 ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
53 num_reserved, base1, size1, base2, size2, base3, \
54 size3))
55
56/*
57 * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
58 * then does some alloc + free tests.
59 */
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010060static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
61 const phys_size_t ram_size, const phys_addr_t ram0,
62 const phys_size_t ram0_size,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010063 const phys_addr_t alloc_64k_addr)
64{
65 const phys_addr_t ram_end = ram + ram_size;
66 const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
67
68 struct lmb lmb;
69 long ret;
70 phys_addr_t a, a2, b, b2, c, d;
71
72 /* check for overflow */
73 ut_assert(ram_end == 0 || ram_end > ram);
74 ut_assert(alloc_64k_end > alloc_64k_addr);
75 /* check input addresses + size */
76 ut_assert(alloc_64k_addr >= ram + 8);
77 ut_assert(alloc_64k_end <= ram_end - 8);
78
79 lmb_init(&lmb);
80
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010081 if (ram0_size) {
82 ret = lmb_add(&lmb, ram0, ram0_size);
83 ut_asserteq(ret, 0);
84 }
85
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010086 ret = lmb_add(&lmb, ram, ram_size);
87 ut_asserteq(ret, 0);
88
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010089 if (ram0_size) {
90 ut_asserteq(lmb.memory.cnt, 2);
91 ut_asserteq(lmb.memory.region[0].base, ram0);
92 ut_asserteq(lmb.memory.region[0].size, ram0_size);
93 ut_asserteq(lmb.memory.region[1].base, ram);
94 ut_asserteq(lmb.memory.region[1].size, ram_size);
95 } else {
96 ut_asserteq(lmb.memory.cnt, 1);
97 ut_asserteq(lmb.memory.region[0].base, ram);
98 ut_asserteq(lmb.memory.region[0].size, ram_size);
99 }
100
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100101 /* reserve 64KiB somewhere */
102 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
103 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100104 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100105 0, 0, 0, 0);
106
107 /* allocate somewhere, should be at the end of RAM */
108 a = lmb_alloc(&lmb, 4, 1);
109 ut_asserteq(a, ram_end - 4);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100110 ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100111 ram_end - 4, 4, 0, 0);
112 /* alloc below end of reserved region -> below reserved region */
113 b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
114 ut_asserteq(b, alloc_64k_addr - 4);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100115 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100116 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
117
118 /* 2nd time */
119 c = lmb_alloc(&lmb, 4, 1);
120 ut_asserteq(c, ram_end - 8);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100121 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100122 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
123 d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
124 ut_asserteq(d, alloc_64k_addr - 8);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100125 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100126 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
127
128 ret = lmb_free(&lmb, a, 4);
129 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100130 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100131 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
132 /* allocate again to ensure we get the same address */
133 a2 = lmb_alloc(&lmb, 4, 1);
134 ut_asserteq(a, a2);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100135 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100136 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
137 ret = lmb_free(&lmb, a2, 4);
138 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100139 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100140 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
141
142 ret = lmb_free(&lmb, b, 4);
143 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100144 ASSERT_LMB(&lmb, 0, 0, 3,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100145 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
146 ram_end - 8, 4);
147 /* allocate again to ensure we get the same address */
148 b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
149 ut_asserteq(b, b2);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100150 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100151 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
152 ret = lmb_free(&lmb, b2, 4);
153 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100154 ASSERT_LMB(&lmb, 0, 0, 3,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100155 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
156 ram_end - 8, 4);
157
158 ret = lmb_free(&lmb, c, 4);
159 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100160 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100161 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
162 ret = lmb_free(&lmb, d, 4);
163 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100164 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100165 0, 0, 0, 0);
166
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100167 if (ram0_size) {
168 ut_asserteq(lmb.memory.cnt, 2);
169 ut_asserteq(lmb.memory.region[0].base, ram0);
170 ut_asserteq(lmb.memory.region[0].size, ram0_size);
171 ut_asserteq(lmb.memory.region[1].base, ram);
172 ut_asserteq(lmb.memory.region[1].size, ram_size);
173 } else {
174 ut_asserteq(lmb.memory.cnt, 1);
175 ut_asserteq(lmb.memory.region[0].base, ram);
176 ut_asserteq(lmb.memory.region[0].size, ram_size);
177 }
178
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100179 return 0;
180}
181
182static int test_multi_alloc_512mb(struct unit_test_state *uts,
183 const phys_addr_t ram)
184{
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100185 return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
186}
187
188static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
189 const phys_addr_t ram,
190 const phys_addr_t ram0)
191{
192 return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
193 ram + 0x10000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100194}
195
196/* Create a memory region with one reserved region and allocate */
197static int lib_test_lmb_simple(struct unit_test_state *uts)
198{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100199 int ret;
200
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100201 /* simulate 512 MiB RAM beginning at 1GiB */
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100202 ret = test_multi_alloc_512mb(uts, 0x40000000);
203 if (ret)
204 return ret;
205
206 /* simulate 512 MiB RAM beginning at 1.5GiB */
207 return test_multi_alloc_512mb(uts, 0xE0000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100208}
Simon Glassb4c722a2023-10-01 19:15:21 -0600209LIB_TEST(lib_test_lmb_simple, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100210
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100211/* Create two memory regions with one reserved region and allocate */
212static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
213{
214 int ret;
215
216 /* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
217 ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
218 if (ret)
219 return ret;
220
221 /* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
222 return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
223}
Simon Glassb4c722a2023-10-01 19:15:21 -0600224LIB_TEST(lib_test_lmb_simple_x2, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100225
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100226/* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
227static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
228{
229 const phys_size_t ram_size = 0x20000000;
230 const phys_size_t big_block_size = 0x10000000;
231 const phys_addr_t ram_end = ram + ram_size;
232 const phys_addr_t alloc_64k_addr = ram + 0x10000000;
233 struct lmb lmb;
234 long ret;
235 phys_addr_t a, b;
236
237 /* check for overflow */
238 ut_assert(ram_end == 0 || ram_end > ram);
239
240 lmb_init(&lmb);
241
242 ret = lmb_add(&lmb, ram, ram_size);
243 ut_asserteq(ret, 0);
244
245 /* reserve 64KiB in the middle of RAM */
246 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
247 ut_asserteq(ret, 0);
248 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
249 0, 0, 0, 0);
250
251 /* allocate a big block, should be below reserved */
252 a = lmb_alloc(&lmb, big_block_size, 1);
253 ut_asserteq(a, ram);
254 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
255 big_block_size + 0x10000, 0, 0, 0, 0);
256 /* allocate 2nd big block */
257 /* This should fail, printing an error */
258 b = lmb_alloc(&lmb, big_block_size, 1);
259 ut_asserteq(b, 0);
260 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
261 big_block_size + 0x10000, 0, 0, 0, 0);
262
263 ret = lmb_free(&lmb, a, big_block_size);
264 ut_asserteq(ret, 0);
265 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
266 0, 0, 0, 0);
267
268 /* allocate too big block */
269 /* This should fail, printing an error */
270 a = lmb_alloc(&lmb, ram_size, 1);
271 ut_asserteq(a, 0);
272 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
273 0, 0, 0, 0);
274
275 return 0;
276}
277
278static int lib_test_lmb_big(struct unit_test_state *uts)
279{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100280 int ret;
281
282 /* simulate 512 MiB RAM beginning at 1GiB */
283 ret = test_bigblock(uts, 0x40000000);
284 if (ret)
285 return ret;
286
287 /* simulate 512 MiB RAM beginning at 1.5GiB */
288 return test_bigblock(uts, 0xE0000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100289}
Simon Glassb4c722a2023-10-01 19:15:21 -0600290LIB_TEST(lib_test_lmb_big, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100291
292/* Simulate 512 MiB RAM, allocate a block without previous reservation */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100293static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
294 const phys_addr_t alloc_size, const ulong align)
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100295{
296 const phys_size_t ram_size = 0x20000000;
297 const phys_addr_t ram_end = ram + ram_size;
298 struct lmb lmb;
299 long ret;
300 phys_addr_t a, b;
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100301 const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
302 ~(align - 1);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100303
304 /* check for overflow */
305 ut_assert(ram_end == 0 || ram_end > ram);
306
307 lmb_init(&lmb);
308
309 ret = lmb_add(&lmb, ram, ram_size);
310 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100311 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100312
313 /* allocate a block */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100314 a = lmb_alloc(&lmb, alloc_size, align);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100315 ut_assert(a != 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100316 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
317 alloc_size, 0, 0, 0, 0);
318 /* allocate another block */
319 b = lmb_alloc(&lmb, alloc_size, align);
320 ut_assert(b != 0);
321 if (alloc_size == alloc_size_aligned) {
322 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
323 (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
324 0);
325 } else {
326 ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
327 (alloc_size_aligned * 2), alloc_size, ram + ram_size
328 - alloc_size_aligned, alloc_size, 0, 0);
329 }
330 /* and free them */
331 ret = lmb_free(&lmb, b, alloc_size);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100332 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100333 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
334 alloc_size, 0, 0, 0, 0);
335 ret = lmb_free(&lmb, a, alloc_size);
336 ut_asserteq(ret, 0);
337 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100338
339 /* allocate a block with base*/
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100340 b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100341 ut_assert(a == b);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100342 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
343 alloc_size, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100344 /* and free it */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100345 ret = lmb_free(&lmb, b, alloc_size);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100346 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100347 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100348
349 return 0;
350}
351
352static int lib_test_lmb_noreserved(struct unit_test_state *uts)
353{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100354 int ret;
355
356 /* simulate 512 MiB RAM beginning at 1GiB */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100357 ret = test_noreserved(uts, 0x40000000, 4, 1);
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100358 if (ret)
359 return ret;
360
361 /* simulate 512 MiB RAM beginning at 1.5GiB */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100362 return test_noreserved(uts, 0xE0000000, 4, 1);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100363}
364
Simon Glassb4c722a2023-10-01 19:15:21 -0600365LIB_TEST(lib_test_lmb_noreserved, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100366
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100367static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
368{
369 int ret;
370
371 /* simulate 512 MiB RAM beginning at 1GiB */
372 ret = test_noreserved(uts, 0x40000000, 5, 8);
373 if (ret)
374 return ret;
375
376 /* simulate 512 MiB RAM beginning at 1.5GiB */
377 return test_noreserved(uts, 0xE0000000, 5, 8);
378}
Simon Glassb4c722a2023-10-01 19:15:21 -0600379LIB_TEST(lib_test_lmb_unaligned_size, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100380
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100381/*
382 * Simulate a RAM that starts at 0 and allocate down to address 0, which must
383 * fail as '0' means failure for the lmb_alloc functions.
384 */
385static int lib_test_lmb_at_0(struct unit_test_state *uts)
386{
387 const phys_addr_t ram = 0;
388 const phys_size_t ram_size = 0x20000000;
389 struct lmb lmb;
390 long ret;
391 phys_addr_t a, b;
392
393 lmb_init(&lmb);
394
395 ret = lmb_add(&lmb, ram, ram_size);
396 ut_asserteq(ret, 0);
397
398 /* allocate nearly everything */
399 a = lmb_alloc(&lmb, ram_size - 4, 1);
400 ut_asserteq(a, ram + 4);
401 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
402 0, 0, 0, 0);
403 /* allocate the rest */
404 /* This should fail as the allocated address would be 0 */
405 b = lmb_alloc(&lmb, 4, 1);
406 ut_asserteq(b, 0);
407 /* check that this was an error by checking lmb */
408 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
409 0, 0, 0, 0);
410 /* check that this was an error by freeing b */
411 ret = lmb_free(&lmb, b, 4);
412 ut_asserteq(ret, -1);
413 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
414 0, 0, 0, 0);
415
416 ret = lmb_free(&lmb, a, ram_size - 4);
417 ut_asserteq(ret, 0);
418 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
419
420 return 0;
421}
Simon Glassb4c722a2023-10-01 19:15:21 -0600422LIB_TEST(lib_test_lmb_at_0, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100423
424/* Check that calling lmb_reserve with overlapping regions fails. */
425static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
426{
427 const phys_addr_t ram = 0x40000000;
428 const phys_size_t ram_size = 0x20000000;
429 struct lmb lmb;
430 long ret;
431
432 lmb_init(&lmb);
433
434 ret = lmb_add(&lmb, ram, ram_size);
435 ut_asserteq(ret, 0);
436
437 ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
438 ut_asserteq(ret, 0);
439 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
440 0, 0, 0, 0);
441 /* allocate overlapping region should fail */
442 ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
443 ut_asserteq(ret, -1);
444 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
445 0, 0, 0, 0);
446 /* allocate 3nd region */
447 ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
448 ut_asserteq(ret, 0);
449 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
450 0x40030000, 0x10000, 0, 0);
Udit Kumar27575252023-09-26 16:54:43 +0530451 /* allocate 2nd region , This should coalesced all region into one */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100452 ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
453 ut_assert(ret >= 0);
454 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
455 0, 0, 0, 0);
456
Udit Kumar27575252023-09-26 16:54:43 +0530457 /* allocate 2nd region, which should be added as first region */
458 ret = lmb_reserve(&lmb, 0x40000000, 0x8000);
459 ut_assert(ret >= 0);
460 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x8000,
461 0x40010000, 0x30000, 0, 0);
462
463 /* allocate 3rd region, coalesce with first and overlap with second */
464 ret = lmb_reserve(&lmb, 0x40008000, 0x10000);
465 ut_assert(ret >= 0);
466 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x40000,
467 0, 0, 0, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100468 return 0;
469}
Simon Glassb4c722a2023-10-01 19:15:21 -0600470LIB_TEST(lib_test_lmb_overlapping_reserve, 0);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100471
472/*
473 * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
474 * Expect addresses outside the memory range to fail.
475 */
476static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
477{
478 const phys_size_t ram_size = 0x20000000;
479 const phys_addr_t ram_end = ram + ram_size;
480 const phys_size_t alloc_addr_a = ram + 0x8000000;
481 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
482 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
483 struct lmb lmb;
484 long ret;
485 phys_addr_t a, b, c, d, e;
486
487 /* check for overflow */
488 ut_assert(ram_end == 0 || ram_end > ram);
489
490 lmb_init(&lmb);
491
492 ret = lmb_add(&lmb, ram, ram_size);
493 ut_asserteq(ret, 0);
494
495 /* reserve 3 blocks */
496 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
497 ut_asserteq(ret, 0);
498 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
499 ut_asserteq(ret, 0);
500 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
501 ut_asserteq(ret, 0);
502 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
503 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
504
505 /* allocate blocks */
506 a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
507 ut_asserteq(a, ram);
508 ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
509 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
510 b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
511 alloc_addr_b - alloc_addr_a - 0x10000);
512 ut_asserteq(b, alloc_addr_a + 0x10000);
513 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
514 alloc_addr_c, 0x10000, 0, 0);
515 c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
516 alloc_addr_c - alloc_addr_b - 0x10000);
517 ut_asserteq(c, alloc_addr_b + 0x10000);
518 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
519 0, 0, 0, 0);
520 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
521 ram_end - alloc_addr_c - 0x10000);
522 ut_asserteq(d, alloc_addr_c + 0x10000);
523 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
524 0, 0, 0, 0);
525
526 /* allocating anything else should fail */
527 e = lmb_alloc(&lmb, 1, 1);
528 ut_asserteq(e, 0);
529 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
530 0, 0, 0, 0);
531
532 ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
533 ut_asserteq(ret, 0);
534
535 /* allocate at 3 points in free range */
536
537 d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
538 ut_asserteq(d, ram_end - 4);
539 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
540 d, 4, 0, 0);
541 ret = lmb_free(&lmb, d, 4);
542 ut_asserteq(ret, 0);
543 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
544 0, 0, 0, 0);
545
546 d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
547 ut_asserteq(d, ram_end - 128);
548 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
549 d, 4, 0, 0);
550 ret = lmb_free(&lmb, d, 4);
551 ut_asserteq(ret, 0);
552 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
553 0, 0, 0, 0);
554
555 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
556 ut_asserteq(d, alloc_addr_c + 0x10000);
557 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
558 0, 0, 0, 0);
559 ret = lmb_free(&lmb, d, 4);
560 ut_asserteq(ret, 0);
561 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
562 0, 0, 0, 0);
563
564 /* allocate at the bottom */
565 ret = lmb_free(&lmb, a, alloc_addr_a - ram);
566 ut_asserteq(ret, 0);
567 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
568 0, 0, 0, 0);
569 d = lmb_alloc_addr(&lmb, ram, 4);
570 ut_asserteq(d, ram);
571 ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
572 ram + 0x8000000, 0x10010000, 0, 0);
573
574 /* check that allocating outside memory fails */
575 if (ram_end != 0) {
576 ret = lmb_alloc_addr(&lmb, ram_end, 1);
577 ut_asserteq(ret, 0);
578 }
579 if (ram != 0) {
580 ret = lmb_alloc_addr(&lmb, ram - 1, 1);
581 ut_asserteq(ret, 0);
582 }
583
584 return 0;
585}
586
587static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
588{
589 int ret;
590
591 /* simulate 512 MiB RAM beginning at 1GiB */
592 ret = test_alloc_addr(uts, 0x40000000);
593 if (ret)
594 return ret;
595
596 /* simulate 512 MiB RAM beginning at 1.5GiB */
597 return test_alloc_addr(uts, 0xE0000000);
598}
Simon Glassb4c722a2023-10-01 19:15:21 -0600599LIB_TEST(lib_test_lmb_alloc_addr, 0);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100600
601/* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
602static int test_get_unreserved_size(struct unit_test_state *uts,
603 const phys_addr_t ram)
604{
605 const phys_size_t ram_size = 0x20000000;
606 const phys_addr_t ram_end = ram + ram_size;
607 const phys_size_t alloc_addr_a = ram + 0x8000000;
608 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
609 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
610 struct lmb lmb;
611 long ret;
612 phys_size_t s;
613
614 /* check for overflow */
615 ut_assert(ram_end == 0 || ram_end > ram);
616
617 lmb_init(&lmb);
618
619 ret = lmb_add(&lmb, ram, ram_size);
620 ut_asserteq(ret, 0);
621
622 /* reserve 3 blocks */
623 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
624 ut_asserteq(ret, 0);
625 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
626 ut_asserteq(ret, 0);
627 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
628 ut_asserteq(ret, 0);
629 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
630 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
631
632 /* check addresses in between blocks */
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100633 s = lmb_get_free_size(&lmb, ram);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100634 ut_asserteq(s, alloc_addr_a - ram);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100635 s = lmb_get_free_size(&lmb, ram + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100636 ut_asserteq(s, alloc_addr_a - ram - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100637 s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100638 ut_asserteq(s, 4);
639
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100640 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100641 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100642 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100643 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100644 s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100645 ut_asserteq(s, 4);
646
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100647 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100648 ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100649 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100650 ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100651 s = lmb_get_free_size(&lmb, ram_end - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100652 ut_asserteq(s, 4);
653
654 return 0;
655}
656
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100657static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100658{
659 int ret;
660
661 /* simulate 512 MiB RAM beginning at 1GiB */
662 ret = test_get_unreserved_size(uts, 0x40000000);
663 if (ret)
664 return ret;
665
666 /* simulate 512 MiB RAM beginning at 1.5GiB */
667 return test_get_unreserved_size(uts, 0xE0000000);
668}
Simon Glassb4c722a2023-10-01 19:15:21 -0600669LIB_TEST(lib_test_lmb_get_free_size, 0);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100670
Tom Rinie7256632023-02-08 13:39:18 -0500671#ifdef CONFIG_LMB_USE_MAX_REGIONS
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100672static int lib_test_lmb_max_regions(struct unit_test_state *uts)
673{
674 const phys_addr_t ram = 0x00000000;
Tom Rinie7256632023-02-08 13:39:18 -0500675 /*
676 * All of 32bit memory space will contain regions for this test, so
677 * we need to scale ram_size (which in this case is the size of the lmb
678 * region) to match.
679 */
680 const phys_size_t ram_size = ((0xFFFFFFFF >> CONFIG_LMB_MAX_REGIONS)
681 + 1) * CONFIG_LMB_MAX_REGIONS;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100682 const phys_size_t blk_size = 0x10000;
683 phys_addr_t offset;
684 struct lmb lmb;
685 int ret, i;
686
687 lmb_init(&lmb);
688
689 ut_asserteq(lmb.memory.cnt, 0);
Tom Rinie7256632023-02-08 13:39:18 -0500690 ut_asserteq(lmb.memory.max, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100691 ut_asserteq(lmb.reserved.cnt, 0);
Tom Rinie7256632023-02-08 13:39:18 -0500692 ut_asserteq(lmb.reserved.max, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100693
Tom Rinie7256632023-02-08 13:39:18 -0500694 /* Add CONFIG_LMB_MAX_REGIONS memory regions */
695 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100696 offset = ram + 2 * i * ram_size;
697 ret = lmb_add(&lmb, offset, ram_size);
698 ut_asserteq(ret, 0);
699 }
Tom Rinie7256632023-02-08 13:39:18 -0500700 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100701 ut_asserteq(lmb.reserved.cnt, 0);
702
Tom Rinie7256632023-02-08 13:39:18 -0500703 /* error for the (CONFIG_LMB_MAX_REGIONS + 1) memory regions */
704 offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * ram_size;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100705 ret = lmb_add(&lmb, offset, ram_size);
706 ut_asserteq(ret, -1);
707
Tom Rinie7256632023-02-08 13:39:18 -0500708 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100709 ut_asserteq(lmb.reserved.cnt, 0);
710
Tom Rinie7256632023-02-08 13:39:18 -0500711 /* reserve CONFIG_LMB_MAX_REGIONS regions */
712 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100713 offset = ram + 2 * i * blk_size;
714 ret = lmb_reserve(&lmb, offset, blk_size);
715 ut_asserteq(ret, 0);
716 }
717
Tom Rinie7256632023-02-08 13:39:18 -0500718 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
719 ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100720
721 /* error for the 9th reserved blocks */
Tom Rinie7256632023-02-08 13:39:18 -0500722 offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * blk_size;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100723 ret = lmb_reserve(&lmb, offset, blk_size);
724 ut_asserteq(ret, -1);
725
Tom Rinie7256632023-02-08 13:39:18 -0500726 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
727 ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100728
729 /* check each regions */
Tom Rinie7256632023-02-08 13:39:18 -0500730 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100731 ut_asserteq(lmb.memory.region[i].base, ram + 2 * i * ram_size);
732
Tom Rinie7256632023-02-08 13:39:18 -0500733 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100734 ut_asserteq(lmb.reserved.region[i].base, ram + 2 * i * blk_size);
735
736 return 0;
737}
Simon Glassb4c722a2023-10-01 19:15:21 -0600738LIB_TEST(lib_test_lmb_max_regions, 0);
Tom Rinie7256632023-02-08 13:39:18 -0500739#endif
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100740
Patrick Delaunaya1860722021-05-07 14:50:32 +0200741static int lib_test_lmb_flags(struct unit_test_state *uts)
742{
743 const phys_addr_t ram = 0x40000000;
744 const phys_size_t ram_size = 0x20000000;
745 struct lmb lmb;
746 long ret;
747
748 lmb_init(&lmb);
749
750 ret = lmb_add(&lmb, ram, ram_size);
751 ut_asserteq(ret, 0);
752
753 /* reserve, same flag */
754 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
755 ut_asserteq(ret, 0);
756 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
757 0, 0, 0, 0);
758
759 /* reserve again, same flag */
760 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
761 ut_asserteq(ret, 0);
762 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
763 0, 0, 0, 0);
764
765 /* reserve again, new flag */
766 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NONE);
767 ut_asserteq(ret, -1);
768 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
769 0, 0, 0, 0);
770
771 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
772
773 /* merge after */
774 ret = lmb_reserve_flags(&lmb, 0x40020000, 0x10000, LMB_NOMAP);
775 ut_asserteq(ret, 1);
776 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x20000,
777 0, 0, 0, 0);
778
779 /* merge before */
780 ret = lmb_reserve_flags(&lmb, 0x40000000, 0x10000, LMB_NOMAP);
781 ut_asserteq(ret, 1);
782 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x30000,
783 0, 0, 0, 0);
784
785 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
786
787 ret = lmb_reserve_flags(&lmb, 0x40030000, 0x10000, LMB_NONE);
788 ut_asserteq(ret, 0);
789 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
790 0x40030000, 0x10000, 0, 0);
791
792 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
793 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
794
795 /* test that old API use LMB_NONE */
796 ret = lmb_reserve(&lmb, 0x40040000, 0x10000);
797 ut_asserteq(ret, 1);
798 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
799 0x40030000, 0x20000, 0, 0);
800
801 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
802 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
803
804 ret = lmb_reserve_flags(&lmb, 0x40070000, 0x10000, LMB_NOMAP);
805 ut_asserteq(ret, 0);
806 ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
807 0x40030000, 0x20000, 0x40070000, 0x10000);
808
809 ret = lmb_reserve_flags(&lmb, 0x40050000, 0x10000, LMB_NOMAP);
810 ut_asserteq(ret, 0);
811 ASSERT_LMB(&lmb, ram, ram_size, 4, 0x40000000, 0x30000,
812 0x40030000, 0x20000, 0x40050000, 0x10000);
813
814 /* merge with 2 adjacent regions */
815 ret = lmb_reserve_flags(&lmb, 0x40060000, 0x10000, LMB_NOMAP);
816 ut_asserteq(ret, 2);
817 ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
818 0x40030000, 0x20000, 0x40050000, 0x30000);
819
820 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
821 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
822 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[2]), 1);
823
824 return 0;
825}
Simon Glassb4c722a2023-10-01 19:15:21 -0600826LIB_TEST(lib_test_lmb_flags, 0);