blob: ec68227bb6ab5fba7cf0e17a3cf0f7313097f38a [file] [log] [blame]
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2018 Simon Goldschmidt
4 */
5
6#include <common.h>
7#include <lmb.h>
8#include <dm/test.h>
9#include <test/ut.h>
10
11static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
12 phys_addr_t ram_base, phys_size_t ram_size,
13 unsigned long num_reserved,
14 phys_addr_t base1, phys_size_t size1,
15 phys_addr_t base2, phys_size_t size2,
16 phys_addr_t base3, phys_size_t size3)
17{
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010018 if (ram_size) {
19 ut_asserteq(lmb->memory.cnt, 1);
20 ut_asserteq(lmb->memory.region[0].base, ram_base);
21 ut_asserteq(lmb->memory.region[0].size, ram_size);
22 }
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010023
24 ut_asserteq(lmb->reserved.cnt, num_reserved);
25 if (num_reserved > 0) {
26 ut_asserteq(lmb->reserved.region[0].base, base1);
27 ut_asserteq(lmb->reserved.region[0].size, size1);
28 }
29 if (num_reserved > 1) {
30 ut_asserteq(lmb->reserved.region[1].base, base2);
31 ut_asserteq(lmb->reserved.region[1].size, size2);
32 }
33 if (num_reserved > 2) {
34 ut_asserteq(lmb->reserved.region[2].base, base3);
35 ut_asserteq(lmb->reserved.region[2].size, size3);
36 }
37 return 0;
38}
39
40#define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
41 base2, size2, base3, size3) \
42 ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
43 num_reserved, base1, size1, base2, size2, base3, \
44 size3))
45
46/*
47 * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
48 * then does some alloc + free tests.
49 */
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010050static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
51 const phys_size_t ram_size, const phys_addr_t ram0,
52 const phys_size_t ram0_size,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010053 const phys_addr_t alloc_64k_addr)
54{
55 const phys_addr_t ram_end = ram + ram_size;
56 const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
57
58 struct lmb lmb;
59 long ret;
60 phys_addr_t a, a2, b, b2, c, d;
61
62 /* check for overflow */
63 ut_assert(ram_end == 0 || ram_end > ram);
64 ut_assert(alloc_64k_end > alloc_64k_addr);
65 /* check input addresses + size */
66 ut_assert(alloc_64k_addr >= ram + 8);
67 ut_assert(alloc_64k_end <= ram_end - 8);
68
69 lmb_init(&lmb);
70
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010071 if (ram0_size) {
72 ret = lmb_add(&lmb, ram0, ram0_size);
73 ut_asserteq(ret, 0);
74 }
75
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010076 ret = lmb_add(&lmb, ram, ram_size);
77 ut_asserteq(ret, 0);
78
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010079 if (ram0_size) {
80 ut_asserteq(lmb.memory.cnt, 2);
81 ut_asserteq(lmb.memory.region[0].base, ram0);
82 ut_asserteq(lmb.memory.region[0].size, ram0_size);
83 ut_asserteq(lmb.memory.region[1].base, ram);
84 ut_asserteq(lmb.memory.region[1].size, ram_size);
85 } else {
86 ut_asserteq(lmb.memory.cnt, 1);
87 ut_asserteq(lmb.memory.region[0].base, ram);
88 ut_asserteq(lmb.memory.region[0].size, ram_size);
89 }
90
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010091 /* reserve 64KiB somewhere */
92 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
93 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +010094 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +010095 0, 0, 0, 0);
96
97 /* allocate somewhere, should be at the end of RAM */
98 a = lmb_alloc(&lmb, 4, 1);
99 ut_asserteq(a, ram_end - 4);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100100 ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100101 ram_end - 4, 4, 0, 0);
102 /* alloc below end of reserved region -> below reserved region */
103 b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
104 ut_asserteq(b, alloc_64k_addr - 4);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100105 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100106 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
107
108 /* 2nd time */
109 c = lmb_alloc(&lmb, 4, 1);
110 ut_asserteq(c, ram_end - 8);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100111 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100112 alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
113 d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
114 ut_asserteq(d, alloc_64k_addr - 8);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100115 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100116 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
117
118 ret = lmb_free(&lmb, a, 4);
119 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100120 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100121 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
122 /* allocate again to ensure we get the same address */
123 a2 = lmb_alloc(&lmb, 4, 1);
124 ut_asserteq(a, a2);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100125 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100126 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
127 ret = lmb_free(&lmb, a2, 4);
128 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100129 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100130 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
131
132 ret = lmb_free(&lmb, b, 4);
133 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100134 ASSERT_LMB(&lmb, 0, 0, 3,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100135 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
136 ram_end - 8, 4);
137 /* allocate again to ensure we get the same address */
138 b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
139 ut_asserteq(b, b2);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100140 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100141 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
142 ret = lmb_free(&lmb, b2, 4);
143 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100144 ASSERT_LMB(&lmb, 0, 0, 3,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100145 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
146 ram_end - 8, 4);
147
148 ret = lmb_free(&lmb, c, 4);
149 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100150 ASSERT_LMB(&lmb, 0, 0, 2,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100151 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
152 ret = lmb_free(&lmb, d, 4);
153 ut_asserteq(ret, 0);
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100154 ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100155 0, 0, 0, 0);
156
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100157 if (ram0_size) {
158 ut_asserteq(lmb.memory.cnt, 2);
159 ut_asserteq(lmb.memory.region[0].base, ram0);
160 ut_asserteq(lmb.memory.region[0].size, ram0_size);
161 ut_asserteq(lmb.memory.region[1].base, ram);
162 ut_asserteq(lmb.memory.region[1].size, ram_size);
163 } else {
164 ut_asserteq(lmb.memory.cnt, 1);
165 ut_asserteq(lmb.memory.region[0].base, ram);
166 ut_asserteq(lmb.memory.region[0].size, ram_size);
167 }
168
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100169 return 0;
170}
171
172static int test_multi_alloc_512mb(struct unit_test_state *uts,
173 const phys_addr_t ram)
174{
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100175 return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
176}
177
178static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
179 const phys_addr_t ram,
180 const phys_addr_t ram0)
181{
182 return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
183 ram + 0x10000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100184}
185
186/* Create a memory region with one reserved region and allocate */
187static int lib_test_lmb_simple(struct unit_test_state *uts)
188{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100189 int ret;
190
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100191 /* simulate 512 MiB RAM beginning at 1GiB */
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100192 ret = test_multi_alloc_512mb(uts, 0x40000000);
193 if (ret)
194 return ret;
195
196 /* simulate 512 MiB RAM beginning at 1.5GiB */
197 return test_multi_alloc_512mb(uts, 0xE0000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100198}
199
200DM_TEST(lib_test_lmb_simple, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
201
Simon Goldschmidtc722dac2019-02-01 21:23:59 +0100202/* Create two memory regions with one reserved region and allocate */
203static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
204{
205 int ret;
206
207 /* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
208 ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
209 if (ret)
210 return ret;
211
212 /* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
213 return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
214}
215
216DM_TEST(lib_test_lmb_simple_x2, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
217
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100218/* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
219static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
220{
221 const phys_size_t ram_size = 0x20000000;
222 const phys_size_t big_block_size = 0x10000000;
223 const phys_addr_t ram_end = ram + ram_size;
224 const phys_addr_t alloc_64k_addr = ram + 0x10000000;
225 struct lmb lmb;
226 long ret;
227 phys_addr_t a, b;
228
229 /* check for overflow */
230 ut_assert(ram_end == 0 || ram_end > ram);
231
232 lmb_init(&lmb);
233
234 ret = lmb_add(&lmb, ram, ram_size);
235 ut_asserteq(ret, 0);
236
237 /* reserve 64KiB in the middle of RAM */
238 ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
239 ut_asserteq(ret, 0);
240 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
241 0, 0, 0, 0);
242
243 /* allocate a big block, should be below reserved */
244 a = lmb_alloc(&lmb, big_block_size, 1);
245 ut_asserteq(a, ram);
246 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
247 big_block_size + 0x10000, 0, 0, 0, 0);
248 /* allocate 2nd big block */
249 /* This should fail, printing an error */
250 b = lmb_alloc(&lmb, big_block_size, 1);
251 ut_asserteq(b, 0);
252 ASSERT_LMB(&lmb, ram, ram_size, 1, a,
253 big_block_size + 0x10000, 0, 0, 0, 0);
254
255 ret = lmb_free(&lmb, a, big_block_size);
256 ut_asserteq(ret, 0);
257 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
258 0, 0, 0, 0);
259
260 /* allocate too big block */
261 /* This should fail, printing an error */
262 a = lmb_alloc(&lmb, ram_size, 1);
263 ut_asserteq(a, 0);
264 ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
265 0, 0, 0, 0);
266
267 return 0;
268}
269
270static int lib_test_lmb_big(struct unit_test_state *uts)
271{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100272 int ret;
273
274 /* simulate 512 MiB RAM beginning at 1GiB */
275 ret = test_bigblock(uts, 0x40000000);
276 if (ret)
277 return ret;
278
279 /* simulate 512 MiB RAM beginning at 1.5GiB */
280 return test_bigblock(uts, 0xE0000000);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100281}
282
283DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
284
285/* Simulate 512 MiB RAM, allocate a block without previous reservation */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100286static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
287 const phys_addr_t alloc_size, const ulong align)
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100288{
289 const phys_size_t ram_size = 0x20000000;
290 const phys_addr_t ram_end = ram + ram_size;
291 struct lmb lmb;
292 long ret;
293 phys_addr_t a, b;
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100294 const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
295 ~(align - 1);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100296
297 /* check for overflow */
298 ut_assert(ram_end == 0 || ram_end > ram);
299
300 lmb_init(&lmb);
301
302 ret = lmb_add(&lmb, ram, ram_size);
303 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100304 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100305
306 /* allocate a block */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100307 a = lmb_alloc(&lmb, alloc_size, align);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100308 ut_assert(a != 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100309 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
310 alloc_size, 0, 0, 0, 0);
311 /* allocate another block */
312 b = lmb_alloc(&lmb, alloc_size, align);
313 ut_assert(b != 0);
314 if (alloc_size == alloc_size_aligned) {
315 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
316 (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
317 0);
318 } else {
319 ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
320 (alloc_size_aligned * 2), alloc_size, ram + ram_size
321 - alloc_size_aligned, alloc_size, 0, 0);
322 }
323 /* and free them */
324 ret = lmb_free(&lmb, b, alloc_size);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100325 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100326 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
327 alloc_size, 0, 0, 0, 0);
328 ret = lmb_free(&lmb, a, alloc_size);
329 ut_asserteq(ret, 0);
330 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100331
332 /* allocate a block with base*/
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100333 b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100334 ut_assert(a == b);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100335 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
336 alloc_size, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100337 /* and free it */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100338 ret = lmb_free(&lmb, b, alloc_size);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100339 ut_asserteq(ret, 0);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100340 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100341
342 return 0;
343}
344
345static int lib_test_lmb_noreserved(struct unit_test_state *uts)
346{
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100347 int ret;
348
349 /* simulate 512 MiB RAM beginning at 1GiB */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100350 ret = test_noreserved(uts, 0x40000000, 4, 1);
Simon Goldschmidt6402d9b2019-01-14 22:38:15 +0100351 if (ret)
352 return ret;
353
354 /* simulate 512 MiB RAM beginning at 1.5GiB */
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100355 return test_noreserved(uts, 0xE0000000, 4, 1);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100356}
357
358DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
359
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100360static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
361{
362 int ret;
363
364 /* simulate 512 MiB RAM beginning at 1GiB */
365 ret = test_noreserved(uts, 0x40000000, 5, 8);
366 if (ret)
367 return ret;
368
369 /* simulate 512 MiB RAM beginning at 1.5GiB */
370 return test_noreserved(uts, 0xE0000000, 5, 8);
371}
372
373DM_TEST(lib_test_lmb_unaligned_size, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
Simon Goldschmidt9f3b6272019-01-14 22:38:14 +0100374/*
375 * Simulate a RAM that starts at 0 and allocate down to address 0, which must
376 * fail as '0' means failure for the lmb_alloc functions.
377 */
378static int lib_test_lmb_at_0(struct unit_test_state *uts)
379{
380 const phys_addr_t ram = 0;
381 const phys_size_t ram_size = 0x20000000;
382 struct lmb lmb;
383 long ret;
384 phys_addr_t a, b;
385
386 lmb_init(&lmb);
387
388 ret = lmb_add(&lmb, ram, ram_size);
389 ut_asserteq(ret, 0);
390
391 /* allocate nearly everything */
392 a = lmb_alloc(&lmb, ram_size - 4, 1);
393 ut_asserteq(a, ram + 4);
394 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
395 0, 0, 0, 0);
396 /* allocate the rest */
397 /* This should fail as the allocated address would be 0 */
398 b = lmb_alloc(&lmb, 4, 1);
399 ut_asserteq(b, 0);
400 /* check that this was an error by checking lmb */
401 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
402 0, 0, 0, 0);
403 /* check that this was an error by freeing b */
404 ret = lmb_free(&lmb, b, 4);
405 ut_asserteq(ret, -1);
406 ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
407 0, 0, 0, 0);
408
409 ret = lmb_free(&lmb, a, ram_size - 4);
410 ut_asserteq(ret, 0);
411 ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
412
413 return 0;
414}
415
416DM_TEST(lib_test_lmb_at_0, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
Simon Goldschmidtcb57d132019-01-14 22:38:16 +0100417
418/* Check that calling lmb_reserve with overlapping regions fails. */
419static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
420{
421 const phys_addr_t ram = 0x40000000;
422 const phys_size_t ram_size = 0x20000000;
423 struct lmb lmb;
424 long ret;
425
426 lmb_init(&lmb);
427
428 ret = lmb_add(&lmb, ram, ram_size);
429 ut_asserteq(ret, 0);
430
431 ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
432 ut_asserteq(ret, 0);
433 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
434 0, 0, 0, 0);
435 /* allocate overlapping region should fail */
436 ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
437 ut_asserteq(ret, -1);
438 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
439 0, 0, 0, 0);
440 /* allocate 3nd region */
441 ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
442 ut_asserteq(ret, 0);
443 ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
444 0x40030000, 0x10000, 0, 0);
445 /* allocate 2nd region */
446 ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
447 ut_assert(ret >= 0);
448 ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
449 0, 0, 0, 0);
450
451 return 0;
452}
453
454DM_TEST(lib_test_lmb_overlapping_reserve,
455 DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100456
457/*
458 * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
459 * Expect addresses outside the memory range to fail.
460 */
461static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
462{
463 const phys_size_t ram_size = 0x20000000;
464 const phys_addr_t ram_end = ram + ram_size;
465 const phys_size_t alloc_addr_a = ram + 0x8000000;
466 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
467 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
468 struct lmb lmb;
469 long ret;
470 phys_addr_t a, b, c, d, e;
471
472 /* check for overflow */
473 ut_assert(ram_end == 0 || ram_end > ram);
474
475 lmb_init(&lmb);
476
477 ret = lmb_add(&lmb, ram, ram_size);
478 ut_asserteq(ret, 0);
479
480 /* reserve 3 blocks */
481 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
482 ut_asserteq(ret, 0);
483 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
484 ut_asserteq(ret, 0);
485 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
486 ut_asserteq(ret, 0);
487 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
488 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
489
490 /* allocate blocks */
491 a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
492 ut_asserteq(a, ram);
493 ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
494 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
495 b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
496 alloc_addr_b - alloc_addr_a - 0x10000);
497 ut_asserteq(b, alloc_addr_a + 0x10000);
498 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
499 alloc_addr_c, 0x10000, 0, 0);
500 c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
501 alloc_addr_c - alloc_addr_b - 0x10000);
502 ut_asserteq(c, alloc_addr_b + 0x10000);
503 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
504 0, 0, 0, 0);
505 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
506 ram_end - alloc_addr_c - 0x10000);
507 ut_asserteq(d, alloc_addr_c + 0x10000);
508 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
509 0, 0, 0, 0);
510
511 /* allocating anything else should fail */
512 e = lmb_alloc(&lmb, 1, 1);
513 ut_asserteq(e, 0);
514 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
515 0, 0, 0, 0);
516
517 ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
518 ut_asserteq(ret, 0);
519
520 /* allocate at 3 points in free range */
521
522 d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
523 ut_asserteq(d, ram_end - 4);
524 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
525 d, 4, 0, 0);
526 ret = lmb_free(&lmb, d, 4);
527 ut_asserteq(ret, 0);
528 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
529 0, 0, 0, 0);
530
531 d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
532 ut_asserteq(d, ram_end - 128);
533 ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
534 d, 4, 0, 0);
535 ret = lmb_free(&lmb, d, 4);
536 ut_asserteq(ret, 0);
537 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
538 0, 0, 0, 0);
539
540 d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
541 ut_asserteq(d, alloc_addr_c + 0x10000);
542 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
543 0, 0, 0, 0);
544 ret = lmb_free(&lmb, d, 4);
545 ut_asserteq(ret, 0);
546 ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
547 0, 0, 0, 0);
548
549 /* allocate at the bottom */
550 ret = lmb_free(&lmb, a, alloc_addr_a - ram);
551 ut_asserteq(ret, 0);
552 ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
553 0, 0, 0, 0);
554 d = lmb_alloc_addr(&lmb, ram, 4);
555 ut_asserteq(d, ram);
556 ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
557 ram + 0x8000000, 0x10010000, 0, 0);
558
559 /* check that allocating outside memory fails */
560 if (ram_end != 0) {
561 ret = lmb_alloc_addr(&lmb, ram_end, 1);
562 ut_asserteq(ret, 0);
563 }
564 if (ram != 0) {
565 ret = lmb_alloc_addr(&lmb, ram - 1, 1);
566 ut_asserteq(ret, 0);
567 }
568
569 return 0;
570}
571
572static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
573{
574 int ret;
575
576 /* simulate 512 MiB RAM beginning at 1GiB */
577 ret = test_alloc_addr(uts, 0x40000000);
578 if (ret)
579 return ret;
580
581 /* simulate 512 MiB RAM beginning at 1.5GiB */
582 return test_alloc_addr(uts, 0xE0000000);
583}
584
585DM_TEST(lib_test_lmb_alloc_addr, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
586
587/* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
588static int test_get_unreserved_size(struct unit_test_state *uts,
589 const phys_addr_t ram)
590{
591 const phys_size_t ram_size = 0x20000000;
592 const phys_addr_t ram_end = ram + ram_size;
593 const phys_size_t alloc_addr_a = ram + 0x8000000;
594 const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
595 const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
596 struct lmb lmb;
597 long ret;
598 phys_size_t s;
599
600 /* check for overflow */
601 ut_assert(ram_end == 0 || ram_end > ram);
602
603 lmb_init(&lmb);
604
605 ret = lmb_add(&lmb, ram, ram_size);
606 ut_asserteq(ret, 0);
607
608 /* reserve 3 blocks */
609 ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
610 ut_asserteq(ret, 0);
611 ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
612 ut_asserteq(ret, 0);
613 ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
614 ut_asserteq(ret, 0);
615 ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
616 alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
617
618 /* check addresses in between blocks */
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100619 s = lmb_get_free_size(&lmb, ram);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100620 ut_asserteq(s, alloc_addr_a - ram);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100621 s = lmb_get_free_size(&lmb, ram + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100622 ut_asserteq(s, alloc_addr_a - ram - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100623 s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100624 ut_asserteq(s, 4);
625
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100626 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100627 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100628 s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100629 ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100630 s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100631 ut_asserteq(s, 4);
632
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100633 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100634 ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100635 s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100636 ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100637 s = lmb_get_free_size(&lmb, ram_end - 4);
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100638 ut_asserteq(s, 4);
639
640 return 0;
641}
642
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100643static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100644{
645 int ret;
646
647 /* simulate 512 MiB RAM beginning at 1GiB */
648 ret = test_get_unreserved_size(uts, 0x40000000);
649 if (ret)
650 return ret;
651
652 /* simulate 512 MiB RAM beginning at 1.5GiB */
653 return test_get_unreserved_size(uts, 0xE0000000);
654}
655
Simon Goldschmidt7510a562019-01-21 20:29:55 +0100656DM_TEST(lib_test_lmb_get_free_size,
Simon Goldschmidt7a6ee462019-01-14 22:38:18 +0100657 DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);