blob: 15c68ce3961b19a8b80308ca78b882b3443fb438 [file] [log] [blame]
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2018 Simon Goldschmidt
4 */
5
6#include <common.h>
Simon Glass75c4d412020-07-19 10:15:37 -06007#include <dm.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +01008#include <lmb.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070010#include <malloc.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010011#include <dm/test.h>
Simon Glass75c4d412020-07-19 10:15:37 -060012#include <test/test.h>
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010013#include <test/ut.h>
14
Heinrich Schuchardta88181e2021-11-14 08:41:07 +010015static inline bool lmb_is_nomap(struct lmb_property *m)
16{
17 return m->flags & LMB_NOMAP;
18}
19
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010020static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
21 phys_addr_t ram_base, phys_size_t ram_size,
22 unsigned long num_reserved,
23 phys_addr_t base1, phys_size_t size1,
24 phys_addr_t base2, phys_size_t size2,
25 phys_addr_t base3, phys_size_t size3)
26{
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010027 if (ram_size) {
28 ut_asserteq(lmb->memory.cnt, 1);
29 ut_asserteq(lmb->memory.region[0].base, ram_base);
30 ut_asserteq(lmb->memory.region[0].size, ram_size);
31 }
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010032
33 ut_asserteq(lmb->reserved.cnt, num_reserved);
34 if (num_reserved > 0) {
35 ut_asserteq(lmb->reserved.region[0].base, base1);
36 ut_asserteq(lmb->reserved.region[0].size, size1);
37 }
38 if (num_reserved > 1) {
39 ut_asserteq(lmb->reserved.region[1].base, base2);
40 ut_asserteq(lmb->reserved.region[1].size, size2);
41 }
42 if (num_reserved > 2) {
43 ut_asserteq(lmb->reserved.region[2].base, base3);
44 ut_asserteq(lmb->reserved.region[2].size, size3);
45 }
46 return 0;
47}
48
49#define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
50 base2, size2, base3, size3) \
51 ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
52 num_reserved, base1, size1, base2, size2, base3, \
53 size3))
54
55/*
56 * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
57 * then does some alloc + free tests.
58 */
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010059static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
60 const phys_size_t ram_size, const phys_addr_t ram0,
61 const phys_size_t ram0_size,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010062 const phys_addr_t alloc_64k_addr)
63{
64 const phys_addr_t ram_end = ram + ram_size;
65 const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
66
67 struct lmb lmb;
68 long ret;
69 phys_addr_t a, a2, b, b2, c, d;
70
71 /* check for overflow */
72 ut_assert(ram_end == 0 || ram_end > ram);
73 ut_assert(alloc_64k_end > alloc_64k_addr);
74 /* check input addresses + size */
75 ut_assert(alloc_64k_addr >= ram + 8);
76 ut_assert(alloc_64k_end <= ram_end - 8);
77
78 lmb_init(&lmb);
79
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010080 if (ram0_size) {
81 ret = lmb_add(&lmb, ram0, ram0_size);
82 ut_asserteq(ret, 0);
83 }
84
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010085 ret = lmb_add(&lmb, ram, ram_size);
86 ut_asserteq(ret, 0);
87
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010088 if (ram0_size) {
89 ut_asserteq(lmb.memory.cnt, 2);
90 ut_asserteq(lmb.memory.region[0].base, ram0);
91 ut_asserteq(lmb.memory.region[0].size, ram0_size);
92 ut_asserteq(lmb.memory.region[1].base, ram);
93 ut_asserteq(lmb.memory.region[1].size, ram_size);
94 } else {
95 ut_asserteq(lmb.memory.cnt, 1);
96 ut_asserteq(lmb.memory.region[0].base, ram);
97 ut_asserteq(lmb.memory.region[0].size, ram_size);
98 }
99
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100100 /* reserve 64KiB somewhere */
101 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
102 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100103 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100104 0, 0, 0, 0);
105
106 /* allocate somewhere, should be at the end of RAM */
107 a = lmb_alloc(&lmb, 4, 1);
108 ut_asserteq(a, ram_end - 4);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100109 ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100110 ram_end - 4, 4, 0, 0);
111 /* alloc below end of reserved region -> below reserved region */
112 b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
113 ut_asserteq(b, alloc_64k_addr - 4);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100114 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100115 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
116
117 /* 2nd time */
118 c = lmb_alloc(&lmb, 4, 1);
119 ut_asserteq(c, ram_end - 8);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100120 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100121 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
122 d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
123 ut_asserteq(d, alloc_64k_addr - 8);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100124 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100125 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
126
127 ret = lmb_free(&lmb, a, 4);
128 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100129 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100130 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
131 /* allocate again to ensure we get the same address */
132 a2 = lmb_alloc(&lmb, 4, 1);
133 ut_asserteq(a, a2);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100134 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100135 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
136 ret = lmb_free(&lmb, a2, 4);
137 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100138 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100139 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
140
141 ret = lmb_free(&lmb, b, 4);
142 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100143 ASSERT_LMB(&lmb, 0, 0, 3,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100144 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
145 ram_end - 8, 4);
146 /* allocate again to ensure we get the same address */
147 b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
148 ut_asserteq(b, b2);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100149 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100150 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
151 ret = lmb_free(&lmb, b2, 4);
152 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100153 ASSERT_LMB(&lmb, 0, 0, 3,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100154 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
155 ram_end - 8, 4);
156
157 ret = lmb_free(&lmb, c, 4);
158 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100159 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100160 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
161 ret = lmb_free(&lmb, d, 4);
162 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100163 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100164 0, 0, 0, 0);
165
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100166 if (ram0_size) {
167 ut_asserteq(lmb.memory.cnt, 2);
168 ut_asserteq(lmb.memory.region[0].base, ram0);
169 ut_asserteq(lmb.memory.region[0].size, ram0_size);
170 ut_asserteq(lmb.memory.region[1].base, ram);
171 ut_asserteq(lmb.memory.region[1].size, ram_size);
172 } else {
173 ut_asserteq(lmb.memory.cnt, 1);
174 ut_asserteq(lmb.memory.region[0].base, ram);
175 ut_asserteq(lmb.memory.region[0].size, ram_size);
176 }
177
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100178 return 0;
179}
180
181static int test_multi_alloc_512mb(struct unit_test_state *uts,
182 const phys_addr_t ram)
183{
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100184 return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
185}
186
187static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
188 const phys_addr_t ram,
189 const phys_addr_t ram0)
190{
191 return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
192 ram + 0x10000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100193}
194
195/* Create a memory region with one reserved region and allocate */
196static int lib_test_lmb_simple(struct unit_test_state *uts)
197{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100198 int ret;
199
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100200 /* simulate 512 MiB RAM beginning at 1GiB */
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100201 ret = test_multi_alloc_512mb(uts, 0x40000000);
202 if (ret)
203 return ret;
204
205 /* simulate 512 MiB RAM beginning at 1.5GiB */
206 return test_multi_alloc_512mb(uts, 0xE0000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100207}
208
Simon Glass974dccd2020-07-28 19:41:12 -0600209DM_TEST(lib_test_lmb_simple, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100210
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100211/* Create two memory regions with one reserved region and allocate */
212static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
213{
214 int ret;
215
216 /* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
217 ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
218 if (ret)
219 return ret;
220
221 /* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
222 return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
223}
224
Simon Glass974dccd2020-07-28 19:41:12 -0600225DM_TEST(lib_test_lmb_simple_x2, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100226
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100227/* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
228static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
229{
230 const phys_size_t ram_size = 0x20000000;
231 const phys_size_t big_block_size = 0x10000000;
232 const phys_addr_t ram_end = ram + ram_size;
233 const phys_addr_t alloc_64k_addr = ram + 0x10000000;
234 struct lmb lmb;
235 long ret;
236 phys_addr_t a, b;
237
238 /* check for overflow */
239 ut_assert(ram_end == 0 || ram_end > ram);
240
241 lmb_init(&lmb);
242
243 ret = lmb_add(&lmb, ram, ram_size);
244 ut_asserteq(ret, 0);
245
246 /* reserve 64KiB in the middle of RAM */
247 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
248 ut_asserteq(ret, 0);
249 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
250 0, 0, 0, 0);
251
252 /* allocate a big block, should be below reserved */
253 a = lmb_alloc(&lmb, big_block_size, 1);
254 ut_asserteq(a, ram);
255 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
256 big_block_size + 0x10000, 0, 0, 0, 0);
257 /* allocate 2nd big block */
258 /* This should fail, printing an error */
259 b = lmb_alloc(&lmb, big_block_size, 1);
260 ut_asserteq(b, 0);
261 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
262 big_block_size + 0x10000, 0, 0, 0, 0);
263
264 ret = lmb_free(&lmb, a, big_block_size);
265 ut_asserteq(ret, 0);
266 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
267 0, 0, 0, 0);
268
269 /* allocate too big block */
270 /* This should fail, printing an error */
271 a = lmb_alloc(&lmb, ram_size, 1);
272 ut_asserteq(a, 0);
273 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
274 0, 0, 0, 0);
275
276 return 0;
277}
278
279static int lib_test_lmb_big(struct unit_test_state *uts)
280{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100281 int ret;
282
283 /* simulate 512 MiB RAM beginning at 1GiB */
284 ret = test_bigblock(uts, 0x40000000);
285 if (ret)
286 return ret;
287
288 /* simulate 512 MiB RAM beginning at 1.5GiB */
289 return test_bigblock(uts, 0xE0000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100290}
291
Simon Glass974dccd2020-07-28 19:41:12 -0600292DM_TEST(lib_test_lmb_big, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100293
294/* Simulate 512 MiB RAM, allocate a block without previous reservation */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100295static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
296 const phys_addr_t alloc_size, const ulong align)
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100297{
298 const phys_size_t ram_size = 0x20000000;
299 const phys_addr_t ram_end = ram + ram_size;
300 struct lmb lmb;
301 long ret;
302 phys_addr_t a, b;
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100303 const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
304 ~(align - 1);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100305
306 /* check for overflow */
307 ut_assert(ram_end == 0 || ram_end > ram);
308
309 lmb_init(&lmb);
310
311 ret = lmb_add(&lmb, ram, ram_size);
312 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100313 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100314
315 /* allocate a block */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100316 a = lmb_alloc(&lmb, alloc_size, align);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100317 ut_assert(a != 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100318 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
319 alloc_size, 0, 0, 0, 0);
320 /* allocate another block */
321 b = lmb_alloc(&lmb, alloc_size, align);
322 ut_assert(b != 0);
323 if (alloc_size == alloc_size_aligned) {
324 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
325 (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
326 0);
327 } else {
328 ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
329 (alloc_size_aligned * 2), alloc_size, ram + ram_size
330 - alloc_size_aligned, alloc_size, 0, 0);
331 }
332 /* and free them */
333 ret = lmb_free(&lmb, b, alloc_size);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100334 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100335 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
336 alloc_size, 0, 0, 0, 0);
337 ret = lmb_free(&lmb, a, alloc_size);
338 ut_asserteq(ret, 0);
339 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100340
341 /* allocate a block with base*/
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100342 b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100343 ut_assert(a == b);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100344 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
345 alloc_size, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100346 /* and free it */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100347 ret = lmb_free(&lmb, b, alloc_size);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100348 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100349 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100350
351 return 0;
352}
353
354static int lib_test_lmb_noreserved(struct unit_test_state *uts)
355{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100356 int ret;
357
358 /* simulate 512 MiB RAM beginning at 1GiB */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100359 ret = test_noreserved(uts, 0x40000000, 4, 1);
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100360 if (ret)
361 return ret;
362
363 /* simulate 512 MiB RAM beginning at 1.5GiB */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100364 return test_noreserved(uts, 0xE0000000, 4, 1);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100365}
366
Simon Glass974dccd2020-07-28 19:41:12 -0600367DM_TEST(lib_test_lmb_noreserved, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100368
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100369static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
370{
371 int ret;
372
373 /* simulate 512 MiB RAM beginning at 1GiB */
374 ret = test_noreserved(uts, 0x40000000, 5, 8);
375 if (ret)
376 return ret;
377
378 /* simulate 512 MiB RAM beginning at 1.5GiB */
379 return test_noreserved(uts, 0xE0000000, 5, 8);
380}
381
Simon Glass974dccd2020-07-28 19:41:12 -0600382DM_TEST(lib_test_lmb_unaligned_size, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100383/*
384 * Simulate a RAM that starts at 0 and allocate down to address 0, which must
385 * fail as '0' means failure for the lmb_alloc functions.
386 */
387static int lib_test_lmb_at_0(struct unit_test_state *uts)
388{
389 const phys_addr_t ram = 0;
390 const phys_size_t ram_size = 0x20000000;
391 struct lmb lmb;
392 long ret;
393 phys_addr_t a, b;
394
395 lmb_init(&lmb);
396
397 ret = lmb_add(&lmb, ram, ram_size);
398 ut_asserteq(ret, 0);
399
400 /* allocate nearly everything */
401 a = lmb_alloc(&lmb, ram_size - 4, 1);
402 ut_asserteq(a, ram + 4);
403 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
404 0, 0, 0, 0);
405 /* allocate the rest */
406 /* This should fail as the allocated address would be 0 */
407 b = lmb_alloc(&lmb, 4, 1);
408 ut_asserteq(b, 0);
409 /* check that this was an error by checking lmb */
410 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
411 0, 0, 0, 0);
412 /* check that this was an error by freeing b */
413 ret = lmb_free(&lmb, b, 4);
414 ut_asserteq(ret, -1);
415 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
416 0, 0, 0, 0);
417
418 ret = lmb_free(&lmb, a, ram_size - 4);
419 ut_asserteq(ret, 0);
420 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
421
422 return 0;
423}
424
Simon Glass974dccd2020-07-28 19:41:12 -0600425DM_TEST(lib_test_lmb_at_0, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100426
427/* Check that calling lmb_reserve with overlapping regions fails. */
428static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
429{
430 const phys_addr_t ram = 0x40000000;
431 const phys_size_t ram_size = 0x20000000;
432 struct lmb lmb;
433 long ret;
434
435 lmb_init(&lmb);
436
437 ret = lmb_add(&lmb, ram, ram_size);
438 ut_asserteq(ret, 0);
439
440 ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
441 ut_asserteq(ret, 0);
442 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
443 0, 0, 0, 0);
444 /* allocate overlapping region should fail */
445 ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
446 ut_asserteq(ret, -1);
447 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
448 0, 0, 0, 0);
449 /* allocate 3nd region */
450 ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
451 ut_asserteq(ret, 0);
452 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
453 0x40030000, 0x10000, 0, 0);
Udit Kumar27575252023-09-26 16:54:43 +0530454 /* allocate 2nd region , This should coalesced all region into one */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100455 ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
456 ut_assert(ret >= 0);
457 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
458 0, 0, 0, 0);
459
Udit Kumar27575252023-09-26 16:54:43 +0530460 /* allocate 2nd region, which should be added as first region */
461 ret = lmb_reserve(&lmb, 0x40000000, 0x8000);
462 ut_assert(ret >= 0);
463 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x8000,
464 0x40010000, 0x30000, 0, 0);
465
466 /* allocate 3rd region, coalesce with first and overlap with second */
467 ret = lmb_reserve(&lmb, 0x40008000, 0x10000);
468 ut_assert(ret >= 0);
469 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x40000,
470 0, 0, 0, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100471 return 0;
472}
473
474DM_TEST(lib_test_lmb_overlapping_reserve,
Simon Glass974dccd2020-07-28 19:41:12 -0600475 UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100476
477/*
478 * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
479 * Expect addresses outside the memory range to fail.
480 */
481static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
482{
483 const phys_size_t ram_size = 0x20000000;
484 const phys_addr_t ram_end = ram + ram_size;
485 const phys_size_t alloc_addr_a = ram + 0x8000000;
486 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
487 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
488 struct lmb lmb;
489 long ret;
490 phys_addr_t a, b, c, d, e;
491
492 /* check for overflow */
493 ut_assert(ram_end == 0 || ram_end > ram);
494
495 lmb_init(&lmb);
496
497 ret = lmb_add(&lmb, ram, ram_size);
498 ut_asserteq(ret, 0);
499
500 /* reserve 3 blocks */
501 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
502 ut_asserteq(ret, 0);
503 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
504 ut_asserteq(ret, 0);
505 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
506 ut_asserteq(ret, 0);
507 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
508 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
509
510 /* allocate blocks */
511 a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
512 ut_asserteq(a, ram);
513 ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
514 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
515 b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
516 alloc_addr_b - alloc_addr_a - 0x10000);
517 ut_asserteq(b, alloc_addr_a + 0x10000);
518 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
519 alloc_addr_c, 0x10000, 0, 0);
520 c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
521 alloc_addr_c - alloc_addr_b - 0x10000);
522 ut_asserteq(c, alloc_addr_b + 0x10000);
523 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
524 0, 0, 0, 0);
525 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
526 ram_end - alloc_addr_c - 0x10000);
527 ut_asserteq(d, alloc_addr_c + 0x10000);
528 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
529 0, 0, 0, 0);
530
531 /* allocating anything else should fail */
532 e = lmb_alloc(&lmb, 1, 1);
533 ut_asserteq(e, 0);
534 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
535 0, 0, 0, 0);
536
537 ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
538 ut_asserteq(ret, 0);
539
540 /* allocate at 3 points in free range */
541
542 d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
543 ut_asserteq(d, ram_end - 4);
544 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
545 d, 4, 0, 0);
546 ret = lmb_free(&lmb, d, 4);
547 ut_asserteq(ret, 0);
548 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
549 0, 0, 0, 0);
550
551 d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
552 ut_asserteq(d, ram_end - 128);
553 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
554 d, 4, 0, 0);
555 ret = lmb_free(&lmb, d, 4);
556 ut_asserteq(ret, 0);
557 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
558 0, 0, 0, 0);
559
560 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
561 ut_asserteq(d, alloc_addr_c + 0x10000);
562 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
563 0, 0, 0, 0);
564 ret = lmb_free(&lmb, d, 4);
565 ut_asserteq(ret, 0);
566 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
567 0, 0, 0, 0);
568
569 /* allocate at the bottom */
570 ret = lmb_free(&lmb, a, alloc_addr_a - ram);
571 ut_asserteq(ret, 0);
572 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
573 0, 0, 0, 0);
574 d = lmb_alloc_addr(&lmb, ram, 4);
575 ut_asserteq(d, ram);
576 ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
577 ram + 0x8000000, 0x10010000, 0, 0);
578
579 /* check that allocating outside memory fails */
580 if (ram_end != 0) {
581 ret = lmb_alloc_addr(&lmb, ram_end, 1);
582 ut_asserteq(ret, 0);
583 }
584 if (ram != 0) {
585 ret = lmb_alloc_addr(&lmb, ram - 1, 1);
586 ut_asserteq(ret, 0);
587 }
588
589 return 0;
590}
591
592static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
593{
594 int ret;
595
596 /* simulate 512 MiB RAM beginning at 1GiB */
597 ret = test_alloc_addr(uts, 0x40000000);
598 if (ret)
599 return ret;
600
601 /* simulate 512 MiB RAM beginning at 1.5GiB */
602 return test_alloc_addr(uts, 0xE0000000);
603}
604
Simon Glass974dccd2020-07-28 19:41:12 -0600605DM_TEST(lib_test_lmb_alloc_addr, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100606
607/* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
608static int test_get_unreserved_size(struct unit_test_state *uts,
609 const phys_addr_t ram)
610{
611 const phys_size_t ram_size = 0x20000000;
612 const phys_addr_t ram_end = ram + ram_size;
613 const phys_size_t alloc_addr_a = ram + 0x8000000;
614 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
615 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
616 struct lmb lmb;
617 long ret;
618 phys_size_t s;
619
620 /* check for overflow */
621 ut_assert(ram_end == 0 || ram_end > ram);
622
623 lmb_init(&lmb);
624
625 ret = lmb_add(&lmb, ram, ram_size);
626 ut_asserteq(ret, 0);
627
628 /* reserve 3 blocks */
629 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
630 ut_asserteq(ret, 0);
631 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
632 ut_asserteq(ret, 0);
633 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
634 ut_asserteq(ret, 0);
635 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
636 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
637
638 /* check addresses in between blocks */
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100639 s = lmb_get_free_size(&lmb, ram);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100640 ut_asserteq(s, alloc_addr_a - ram);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100641 s = lmb_get_free_size(&lmb, ram + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100642 ut_asserteq(s, alloc_addr_a - ram - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100643 s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100644 ut_asserteq(s, 4);
645
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100646 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100647 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100648 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100649 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100650 s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100651 ut_asserteq(s, 4);
652
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100653 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100654 ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100655 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100656 ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100657 s = lmb_get_free_size(&lmb, ram_end - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100658 ut_asserteq(s, 4);
659
660 return 0;
661}
662
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100663static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100664{
665 int ret;
666
667 /* simulate 512 MiB RAM beginning at 1GiB */
668 ret = test_get_unreserved_size(uts, 0x40000000);
669 if (ret)
670 return ret;
671
672 /* simulate 512 MiB RAM beginning at 1.5GiB */
673 return test_get_unreserved_size(uts, 0xE0000000);
674}
675
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100676DM_TEST(lib_test_lmb_get_free_size,
Simon Glass974dccd2020-07-28 19:41:12 -0600677 UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100678
Tom Rinie7256632023-02-08 13:39:18 -0500679#ifdef CONFIG_LMB_USE_MAX_REGIONS
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100680static int lib_test_lmb_max_regions(struct unit_test_state *uts)
681{
682 const phys_addr_t ram = 0x00000000;
Tom Rinie7256632023-02-08 13:39:18 -0500683 /*
684 * All of 32bit memory space will contain regions for this test, so
685 * we need to scale ram_size (which in this case is the size of the lmb
686 * region) to match.
687 */
688 const phys_size_t ram_size = ((0xFFFFFFFF >> CONFIG_LMB_MAX_REGIONS)
689 + 1) * CONFIG_LMB_MAX_REGIONS;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100690 const phys_size_t blk_size = 0x10000;
691 phys_addr_t offset;
692 struct lmb lmb;
693 int ret, i;
694
695 lmb_init(&lmb);
696
697 ut_asserteq(lmb.memory.cnt, 0);
Tom Rinie7256632023-02-08 13:39:18 -0500698 ut_asserteq(lmb.memory.max, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100699 ut_asserteq(lmb.reserved.cnt, 0);
Tom Rinie7256632023-02-08 13:39:18 -0500700 ut_asserteq(lmb.reserved.max, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100701
Tom Rinie7256632023-02-08 13:39:18 -0500702 /* Add CONFIG_LMB_MAX_REGIONS memory regions */
703 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100704 offset = ram + 2 * i * ram_size;
705 ret = lmb_add(&lmb, offset, ram_size);
706 ut_asserteq(ret, 0);
707 }
Tom Rinie7256632023-02-08 13:39:18 -0500708 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100709 ut_asserteq(lmb.reserved.cnt, 0);
710
Tom Rinie7256632023-02-08 13:39:18 -0500711 /* error for the (CONFIG_LMB_MAX_REGIONS + 1) memory regions */
712 offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * ram_size;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100713 ret = lmb_add(&lmb, offset, ram_size);
714 ut_asserteq(ret, -1);
715
Tom Rinie7256632023-02-08 13:39:18 -0500716 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100717 ut_asserteq(lmb.reserved.cnt, 0);
718
Tom Rinie7256632023-02-08 13:39:18 -0500719 /* reserve CONFIG_LMB_MAX_REGIONS regions */
720 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100721 offset = ram + 2 * i * blk_size;
722 ret = lmb_reserve(&lmb, offset, blk_size);
723 ut_asserteq(ret, 0);
724 }
725
Tom Rinie7256632023-02-08 13:39:18 -0500726 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
727 ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100728
729 /* error for the 9th reserved blocks */
Tom Rinie7256632023-02-08 13:39:18 -0500730 offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * blk_size;
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100731 ret = lmb_reserve(&lmb, offset, blk_size);
732 ut_asserteq(ret, -1);
733
Tom Rinie7256632023-02-08 13:39:18 -0500734 ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
735 ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100736
737 /* check each regions */
Tom Rinie7256632023-02-08 13:39:18 -0500738 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100739 ut_asserteq(lmb.memory.region[i].base, ram + 2 * i * ram_size);
740
Tom Rinie7256632023-02-08 13:39:18 -0500741 for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100742 ut_asserteq(lmb.reserved.region[i].base, ram + 2 * i * blk_size);
743
744 return 0;
745}
Tom Rinie7256632023-02-08 13:39:18 -0500746#endif
Patrick Delaunay1fe3adc2021-03-10 10:16:30 +0100747
748DM_TEST(lib_test_lmb_max_regions,
749 UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
Patrick Delaunaya1860722021-05-07 14:50:32 +0200750
751static int lib_test_lmb_flags(struct unit_test_state *uts)
752{
753 const phys_addr_t ram = 0x40000000;
754 const phys_size_t ram_size = 0x20000000;
755 struct lmb lmb;
756 long ret;
757
758 lmb_init(&lmb);
759
760 ret = lmb_add(&lmb, ram, ram_size);
761 ut_asserteq(ret, 0);
762
763 /* reserve, same flag */
764 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
765 ut_asserteq(ret, 0);
766 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
767 0, 0, 0, 0);
768
769 /* reserve again, same flag */
770 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
771 ut_asserteq(ret, 0);
772 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
773 0, 0, 0, 0);
774
775 /* reserve again, new flag */
776 ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NONE);
777 ut_asserteq(ret, -1);
778 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
779 0, 0, 0, 0);
780
781 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
782
783 /* merge after */
784 ret = lmb_reserve_flags(&lmb, 0x40020000, 0x10000, LMB_NOMAP);
785 ut_asserteq(ret, 1);
786 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x20000,
787 0, 0, 0, 0);
788
789 /* merge before */
790 ret = lmb_reserve_flags(&lmb, 0x40000000, 0x10000, LMB_NOMAP);
791 ut_asserteq(ret, 1);
792 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x30000,
793 0, 0, 0, 0);
794
795 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
796
797 ret = lmb_reserve_flags(&lmb, 0x40030000, 0x10000, LMB_NONE);
798 ut_asserteq(ret, 0);
799 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
800 0x40030000, 0x10000, 0, 0);
801
802 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
803 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
804
805 /* test that old API use LMB_NONE */
806 ret = lmb_reserve(&lmb, 0x40040000, 0x10000);
807 ut_asserteq(ret, 1);
808 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
809 0x40030000, 0x20000, 0, 0);
810
811 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
812 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
813
814 ret = lmb_reserve_flags(&lmb, 0x40070000, 0x10000, LMB_NOMAP);
815 ut_asserteq(ret, 0);
816 ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
817 0x40030000, 0x20000, 0x40070000, 0x10000);
818
819 ret = lmb_reserve_flags(&lmb, 0x40050000, 0x10000, LMB_NOMAP);
820 ut_asserteq(ret, 0);
821 ASSERT_LMB(&lmb, ram, ram_size, 4, 0x40000000, 0x30000,
822 0x40030000, 0x20000, 0x40050000, 0x10000);
823
824 /* merge with 2 adjacent regions */
825 ret = lmb_reserve_flags(&lmb, 0x40060000, 0x10000, LMB_NOMAP);
826 ut_asserteq(ret, 2);
827 ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
828 0x40030000, 0x20000, 0x40050000, 0x30000);
829
830 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
831 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
832 ut_asserteq(lmb_is_nomap(&lmb.reserved.region[2]), 1);
833
834 return 0;
835}
836
837DM_TEST(lib_test_lmb_flags,
838 UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);