blob: 41c7230424cc52578f64ff8f38cb0ce99d8b5167 [file] [log] [blame]
Heinrich Schuchardt862dd212021-05-29 13:18:00 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This code is based on a version (aka dlmalloc) of malloc/free/realloc written
4 * by Doug Lea and released to the public domain, as explained at
5 * http://creativecommons.org/publicdomain/zero/1.0/-
6 *
7 * The original code is available at http://gee.cs.oswego.edu/pub/misc/
8 * as file malloc-2.6.6.c.
9 */
10
Kumar Galaff9daf12008-05-13 19:01:54 -050011#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060013#include <asm/global_data.h>
Kumar Galaff9daf12008-05-13 19:01:54 -050014
Heinrich Schuchardt28105f42020-04-15 18:46:23 +020015#if CONFIG_IS_ENABLED(UNIT_TEST)
Simon Glass05c86002014-07-10 22:23:33 -060016#define DEBUG
17#endif
18
wdenk217c9da2002-10-25 20:35:49 +000019#include <malloc.h>
Simon Glass863e4042014-07-10 22:23:28 -060020#include <asm/io.h>
Sean Anderson98011e22022-03-23 14:04:49 -040021#include <valgrind/memcheck.h>
Simon Glass863e4042014-07-10 22:23:28 -060022
Wolfgang Denk460a9ff2010-06-20 23:33:59 +020023#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +000024#if __STD_C
25static void malloc_update_mallinfo (void);
26void malloc_stats (void);
27#else
28static void malloc_update_mallinfo ();
29void malloc_stats();
30#endif
Wolfgang Denk460a9ff2010-06-20 23:33:59 +020031#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +000032
Wolfgang Denk6405a152006-03-31 18:32:53 +020033DECLARE_GLOBAL_DATA_PTR;
34
wdenk217c9da2002-10-25 20:35:49 +000035/*
36 Emulation of sbrk for WIN32
37 All code within the ifdef WIN32 is untested by me.
38
39 Thanks to Martin Fong and others for supplying this.
40*/
41
42
43#ifdef WIN32
44
45#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
46~(malloc_getpagesize-1))
47#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
48
49/* resrve 64MB to insure large contiguous space */
50#define RESERVED_SIZE (1024*1024*64)
51#define NEXT_SIZE (2048*1024)
52#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
53
54struct GmListElement;
55typedef struct GmListElement GmListElement;
56
57struct GmListElement
58{
59 GmListElement* next;
60 void* base;
61};
62
63static GmListElement* head = 0;
64static unsigned int gNextAddress = 0;
65static unsigned int gAddressBase = 0;
66static unsigned int gAllocatedSize = 0;
67
68static
69GmListElement* makeGmListElement (void* bas)
70{
71 GmListElement* this;
72 this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
73 assert (this);
74 if (this)
75 {
76 this->base = bas;
77 this->next = head;
78 head = this;
79 }
80 return this;
81}
82
83void gcleanup ()
84{
85 BOOL rval;
86 assert ( (head == NULL) || (head->base == (void*)gAddressBase));
87 if (gAddressBase && (gNextAddress - gAddressBase))
88 {
89 rval = VirtualFree ((void*)gAddressBase,
90 gNextAddress - gAddressBase,
91 MEM_DECOMMIT);
wdenk57b2d802003-06-27 21:31:46 +000092 assert (rval);
wdenk217c9da2002-10-25 20:35:49 +000093 }
94 while (head)
95 {
96 GmListElement* next = head->next;
97 rval = VirtualFree (head->base, 0, MEM_RELEASE);
98 assert (rval);
99 LocalFree (head);
100 head = next;
101 }
102}
103
104static
105void* findRegion (void* start_address, unsigned long size)
106{
107 MEMORY_BASIC_INFORMATION info;
108 if (size >= TOP_MEMORY) return NULL;
109
110 while ((unsigned long)start_address + size < TOP_MEMORY)
111 {
112 VirtualQuery (start_address, &info, sizeof (info));
113 if ((info.State == MEM_FREE) && (info.RegionSize >= size))
114 return start_address;
115 else
116 {
wdenk57b2d802003-06-27 21:31:46 +0000117 /* Requested region is not available so see if the */
118 /* next region is available. Set 'start_address' */
119 /* to the next region and call 'VirtualQuery()' */
120 /* again. */
wdenk217c9da2002-10-25 20:35:49 +0000121
122 start_address = (char*)info.BaseAddress + info.RegionSize;
123
wdenk57b2d802003-06-27 21:31:46 +0000124 /* Make sure we start looking for the next region */
125 /* on the *next* 64K boundary. Otherwise, even if */
126 /* the new region is free according to */
127 /* 'VirtualQuery()', the subsequent call to */
128 /* 'VirtualAlloc()' (which follows the call to */
129 /* this routine in 'wsbrk()') will round *down* */
130 /* the requested address to a 64K boundary which */
131 /* we already know is an address in the */
132 /* unavailable region. Thus, the subsequent call */
133 /* to 'VirtualAlloc()' will fail and bring us back */
134 /* here, causing us to go into an infinite loop. */
wdenk217c9da2002-10-25 20:35:49 +0000135
136 start_address =
137 (void *) AlignPage64K((unsigned long) start_address);
138 }
139 }
140 return NULL;
141
142}
143
144
145void* wsbrk (long size)
146{
147 void* tmp;
148 if (size > 0)
149 {
150 if (gAddressBase == 0)
151 {
152 gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
153 gNextAddress = gAddressBase =
154 (unsigned int)VirtualAlloc (NULL, gAllocatedSize,
155 MEM_RESERVE, PAGE_NOACCESS);
156 } else if (AlignPage (gNextAddress + size) > (gAddressBase +
157gAllocatedSize))
158 {
159 long new_size = max (NEXT_SIZE, AlignPage (size));
160 void* new_address = (void*)(gAddressBase+gAllocatedSize);
161 do
162 {
163 new_address = findRegion (new_address, new_size);
164
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +0100165 if (!new_address)
wdenk217c9da2002-10-25 20:35:49 +0000166 return (void*)-1;
167
168 gAddressBase = gNextAddress =
169 (unsigned int)VirtualAlloc (new_address, new_size,
170 MEM_RESERVE, PAGE_NOACCESS);
wdenk57b2d802003-06-27 21:31:46 +0000171 /* repeat in case of race condition */
172 /* The region that we found has been snagged */
173 /* by another thread */
wdenk217c9da2002-10-25 20:35:49 +0000174 }
175 while (gAddressBase == 0);
176
177 assert (new_address == (void*)gAddressBase);
178
179 gAllocatedSize = new_size;
180
181 if (!makeGmListElement ((void*)gAddressBase))
182 return (void*)-1;
183 }
184 if ((size + gNextAddress) > AlignPage (gNextAddress))
185 {
186 void* res;
187 res = VirtualAlloc ((void*)AlignPage (gNextAddress),
188 (size + gNextAddress -
189 AlignPage (gNextAddress)),
190 MEM_COMMIT, PAGE_READWRITE);
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +0100191 if (!res)
wdenk217c9da2002-10-25 20:35:49 +0000192 return (void*)-1;
193 }
194 tmp = (void*)gNextAddress;
195 gNextAddress = (unsigned int)tmp + size;
196 return tmp;
197 }
198 else if (size < 0)
199 {
200 unsigned int alignedGoal = AlignPage (gNextAddress + size);
201 /* Trim by releasing the virtual memory */
202 if (alignedGoal >= gAddressBase)
203 {
204 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
205 MEM_DECOMMIT);
206 gNextAddress = gNextAddress + size;
207 return (void*)gNextAddress;
208 }
209 else
210 {
211 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
212 MEM_DECOMMIT);
213 gNextAddress = gAddressBase;
214 return (void*)-1;
215 }
216 }
217 else
218 {
219 return (void*)gNextAddress;
220 }
221}
222
223#endif
224
Simon Glass7471cc72014-07-10 22:23:25 -0600225
wdenk217c9da2002-10-25 20:35:49 +0000226
227/*
228 Type declarations
229*/
230
231
232struct malloc_chunk
233{
234 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
235 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
236 struct malloc_chunk* fd; /* double links -- used only if free. */
237 struct malloc_chunk* bk;
Joakim Tjernlundc183eea2010-10-14 08:51:34 +0200238} __attribute__((__may_alias__)) ;
wdenk217c9da2002-10-25 20:35:49 +0000239
240typedef struct malloc_chunk* mchunkptr;
241
242/*
243
244 malloc_chunk details:
245
246 (The following includes lightly edited explanations by Colin Plumb.)
247
248 Chunks of memory are maintained using a `boundary tag' method as
249 described in e.g., Knuth or Standish. (See the paper by Paul
250 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
251 survey of such techniques.) Sizes of free chunks are stored both
252 in the front of each chunk and at the end. This makes
253 consolidating fragmented chunks into bigger chunks very fast. The
254 size fields also hold bits representing whether chunks are free or
255 in use.
256
257 An allocated chunk looks like this:
258
259
260 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000261 | Size of previous chunk, if allocated | |
262 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
263 | Size of chunk, in bytes |P|
wdenk217c9da2002-10-25 20:35:49 +0000264 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000265 | User data starts here... .
266 . .
267 . (malloc_usable_space() bytes) .
268 . |
wdenk217c9da2002-10-25 20:35:49 +0000269nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000270 | Size of chunk |
271 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000272
273
274 Where "chunk" is the front of the chunk for the purpose of most of
275 the malloc code, but "mem" is the pointer that is returned to the
276 user. "Nextchunk" is the beginning of the next contiguous chunk.
277
278 Chunks always begin on even word boundries, so the mem portion
279 (which is returned to the user) is also on an even word boundary, and
280 thus double-word aligned.
281
282 Free chunks are stored in circular doubly-linked lists, and look like this:
283
284 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000285 | Size of previous chunk |
286 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000287 `head:' | Size of chunk, in bytes |P|
288 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000289 | Forward pointer to next chunk in list |
290 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
291 | Back pointer to previous chunk in list |
292 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
293 | Unused space (may be 0 bytes long) .
294 . .
295 . |
Marek Bykowskib4032a72020-04-29 18:23:07 +0200296
wdenk217c9da2002-10-25 20:35:49 +0000297nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
298 `foot:' | Size of chunk, in bytes |
wdenk57b2d802003-06-27 21:31:46 +0000299 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000300
301 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
302 chunk size (which is always a multiple of two words), is an in-use
303 bit for the *previous* chunk. If that bit is *clear*, then the
304 word before the current chunk size contains the previous chunk
305 size, and can be used to find the front of the previous chunk.
306 (The very first chunk allocated always has this bit set,
307 preventing access to non-existent (or non-owned) memory.)
308
309 Note that the `foot' of the current chunk is actually represented
310 as the prev_size of the NEXT chunk. (This makes it easier to
311 deal with alignments etc).
312
313 The two exceptions to all this are
314
315 1. The special chunk `top', which doesn't bother using the
wdenk57b2d802003-06-27 21:31:46 +0000316 trailing size field since there is no
317 next contiguous chunk that would have to index off it. (After
318 initialization, `top' is forced to always exist. If it would
319 become less than MINSIZE bytes long, it is replenished via
320 malloc_extend_top.)
wdenk217c9da2002-10-25 20:35:49 +0000321
322 2. Chunks allocated via mmap, which have the second-lowest-order
wdenk57b2d802003-06-27 21:31:46 +0000323 bit (IS_MMAPPED) set in their size fields. Because they are
324 never merged or traversed from any other chunk, they have no
325 foot size or inuse information.
wdenk217c9da2002-10-25 20:35:49 +0000326
327 Available chunks are kept in any of several places (all declared below):
328
329 * `av': An array of chunks serving as bin headers for consolidated
330 chunks. Each bin is doubly linked. The bins are approximately
331 proportionally (log) spaced. There are a lot of these bins
332 (128). This may look excessive, but works very well in
333 practice. All procedures maintain the invariant that no
334 consolidated chunk physically borders another one. Chunks in
335 bins are kept in size order, with ties going to the
336 approximately least recently used chunk.
337
338 The chunks in each bin are maintained in decreasing sorted order by
339 size. This is irrelevant for the small bins, which all contain
340 the same-sized chunks, but facilitates best-fit allocation for
341 larger chunks. (These lists are just sequential. Keeping them in
342 order almost never requires enough traversal to warrant using
343 fancier ordered data structures.) Chunks of the same size are
344 linked with the most recently freed at the front, and allocations
345 are taken from the back. This results in LRU or FIFO allocation
346 order, which tends to give each chunk an equal opportunity to be
347 consolidated with adjacent freed chunks, resulting in larger free
348 chunks and less fragmentation.
349
350 * `top': The top-most available chunk (i.e., the one bordering the
351 end of available memory) is treated specially. It is never
352 included in any bin, is used only if no other chunk is
353 available, and is released back to the system if it is very
354 large (see M_TRIM_THRESHOLD).
355
356 * `last_remainder': A bin holding only the remainder of the
357 most recently split (non-top) chunk. This bin is checked
358 before other non-fitting chunks, so as to provide better
359 locality for runs of sequentially allocated chunks.
360
361 * Implicitly, through the host system's memory mapping tables.
362 If supported, requests greater than a threshold are usually
363 serviced via calls to mmap, and then later released via munmap.
364
365*/
Simon Glass7471cc72014-07-10 22:23:25 -0600366
wdenk217c9da2002-10-25 20:35:49 +0000367/* sizes, alignments */
368
369#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
370#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
371#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
372#define MINSIZE (sizeof(struct malloc_chunk))
373
374/* conversion from malloc headers to user pointers, and back */
375
376#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
377#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
378
379/* pad request bytes into a usable size */
380
381#define request2size(req) \
382 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
383 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
384 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
385
386/* Check if m has acceptable alignment */
387
388#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
389
390
Simon Glass7471cc72014-07-10 22:23:25 -0600391
wdenk217c9da2002-10-25 20:35:49 +0000392
393/*
394 Physical chunk operations
395*/
396
397
398/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
399
400#define PREV_INUSE 0x1
401
402/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
403
404#define IS_MMAPPED 0x2
405
406/* Bits to mask off when extracting size */
407
408#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
409
410
411/* Ptr to next physical malloc_chunk. */
412
413#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
414
415/* Ptr to previous physical malloc_chunk */
416
417#define prev_chunk(p)\
418 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
419
420
421/* Treat space at ptr + offset as a chunk */
422
423#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
424
425
Simon Glass7471cc72014-07-10 22:23:25 -0600426
wdenk217c9da2002-10-25 20:35:49 +0000427
428/*
429 Dealing with use bits
430*/
431
432/* extract p's inuse bit */
433
434#define inuse(p)\
435((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
436
437/* extract inuse bit of previous chunk */
438
439#define prev_inuse(p) ((p)->size & PREV_INUSE)
440
441/* check for mmap()'ed chunk */
442
443#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
444
445/* set/clear chunk as in use without otherwise disturbing */
446
447#define set_inuse(p)\
448((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
449
450#define clear_inuse(p)\
451((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
452
453/* check/set/clear inuse bits in known places */
454
455#define inuse_bit_at_offset(p, s)\
456 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
457
458#define set_inuse_bit_at_offset(p, s)\
459 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
460
461#define clear_inuse_bit_at_offset(p, s)\
462 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
463
464
Simon Glass7471cc72014-07-10 22:23:25 -0600465
wdenk217c9da2002-10-25 20:35:49 +0000466
467/*
468 Dealing with size fields
469*/
470
471/* Get size, ignoring use bits */
472
473#define chunksize(p) ((p)->size & ~(SIZE_BITS))
474
475/* Set size at head, without disturbing its use bit */
476
477#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
478
479/* Set size/use ignoring previous bits in header */
480
481#define set_head(p, s) ((p)->size = (s))
482
483/* Set size at footer (only when chunk is not in use) */
484
485#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
486
487
wdenk217c9da2002-10-25 20:35:49 +0000488
489
Simon Glass7471cc72014-07-10 22:23:25 -0600490
wdenk217c9da2002-10-25 20:35:49 +0000491/*
492 Bins
493
494 The bins, `av_' are an array of pairs of pointers serving as the
495 heads of (initially empty) doubly-linked lists of chunks, laid out
496 in a way so that each pair can be treated as if it were in a
497 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
498 and chunks are the same).
499
500 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
501 8 bytes apart. Larger bins are approximately logarithmically
502 spaced. (See the table below.) The `av_' array is never mentioned
503 directly in the code, but instead via bin access macros.
504
505 Bin layout:
506
507 64 bins of size 8
508 32 bins of size 64
509 16 bins of size 512
510 8 bins of size 4096
511 4 bins of size 32768
512 2 bins of size 262144
513 1 bin of size what's left
514
515 There is actually a little bit of slop in the numbers in bin_index
516 for the sake of speed. This makes no difference elsewhere.
517
518 The special chunks `top' and `last_remainder' get their own bins,
519 (this is implemented via yet more trickery with the av_ array),
520 although `top' is never properly linked to its bin since it is
521 always handled specially.
522
523*/
524
525#define NAV 128 /* number of bins */
526
527typedef struct malloc_chunk* mbinptr;
528
529/* access macros */
530
531#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
532#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
533#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
534
535/*
536 The first 2 bins are never indexed. The corresponding av_ cells are instead
537 used for bookkeeping. This is not to save space, but to simplify
538 indexing, maintain locality, and avoid some initialization tests.
539*/
540
Stefan Roese37628252008-08-06 14:05:38 +0200541#define top (av_[2]) /* The topmost chunk */
wdenk217c9da2002-10-25 20:35:49 +0000542#define last_remainder (bin_at(1)) /* remainder from last split */
543
544
545/*
546 Because top initially points to its own bin with initial
547 zero size, thus forcing extension on the first malloc request,
548 we avoid having any special code in malloc to check whether
549 it even exists yet. But we still need to in malloc_extend_top.
550*/
551
552#define initial_top ((mchunkptr)(bin_at(0)))
553
554/* Helper macro to initialize bins */
555
556#define IAV(i) bin_at(i), bin_at(i)
557
558static mbinptr av_[NAV * 2 + 2] = {
Kim Phillipsb052b602012-10-29 13:34:32 +0000559 NULL, NULL,
wdenk217c9da2002-10-25 20:35:49 +0000560 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
561 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
562 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
563 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
564 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
565 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
566 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
567 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
568 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
569 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
570 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
571 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
572 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
573 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
574 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
575 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
576};
577
Wolfgang Denkd0813e52010-10-28 20:00:11 +0200578#ifdef CONFIG_NEEDS_MANUAL_RELOC
Gabor Juhosb3b533c2013-01-21 21:10:38 +0000579static void malloc_bin_reloc(void)
wdenk217c9da2002-10-25 20:35:49 +0000580{
Simon Glassb2749d02012-09-04 11:31:07 +0000581 mbinptr *p = &av_[2];
582 size_t i;
583
584 for (i = 2; i < ARRAY_SIZE(av_); ++i, ++p)
585 *p = (mbinptr)((ulong)*p + gd->reloc_off);
wdenk217c9da2002-10-25 20:35:49 +0000586}
Gabor Juhosb3b533c2013-01-21 21:10:38 +0000587#else
588static inline void malloc_bin_reloc(void) {}
Peter Tyser9057cbf2009-09-21 11:20:36 -0500589#endif
Peter Tysera78ded62009-08-21 23:05:19 -0500590
Marek Bykowskib4032a72020-04-29 18:23:07 +0200591#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
592static void malloc_init(void);
593#endif
594
Peter Tysera78ded62009-08-21 23:05:19 -0500595ulong mem_malloc_start = 0;
596ulong mem_malloc_end = 0;
597ulong mem_malloc_brk = 0;
598
Simon Glass1a3e39b2022-09-06 20:27:00 -0600599static bool malloc_testing; /* enable test mode */
600static int malloc_max_allocs; /* return NULL after this many calls to malloc() */
601
Peter Tysera78ded62009-08-21 23:05:19 -0500602void *sbrk(ptrdiff_t increment)
603{
604 ulong old = mem_malloc_brk;
605 ulong new = old + increment;
606
Kumar Gala293d7ad2010-11-15 18:41:43 -0600607 /*
608 * if we are giving memory back make sure we clear it out since
609 * we set MORECORE_CLEARS to 1
610 */
611 if (increment < 0)
612 memset((void *)new, 0, -increment);
613
Peter Tysera78ded62009-08-21 23:05:19 -0500614 if ((new < mem_malloc_start) || (new > mem_malloc_end))
karl.beldan@gmail.com34e50882010-04-06 22:18:08 +0200615 return (void *)MORECORE_FAILURE;
Peter Tysera78ded62009-08-21 23:05:19 -0500616
617 mem_malloc_brk = new;
618
619 return (void *)old;
620}
wdenk217c9da2002-10-25 20:35:49 +0000621
Peter Tyser781c9b82009-08-21 23:05:21 -0500622void mem_malloc_init(ulong start, ulong size)
623{
624 mem_malloc_start = start;
625 mem_malloc_end = start + size;
626 mem_malloc_brk = start;
627
Marek Bykowskib4032a72020-04-29 18:23:07 +0200628#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
629 malloc_init();
630#endif
631
Thierry Reding8023ec22014-08-26 17:34:22 +0200632 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start,
633 mem_malloc_end);
Przemyslaw Marczak88436782015-03-04 14:01:24 +0100634#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
635 memset((void *)mem_malloc_start, 0x0, size);
636#endif
Gabor Juhosb3b533c2013-01-21 21:10:38 +0000637 malloc_bin_reloc();
Peter Tyser781c9b82009-08-21 23:05:21 -0500638}
Peter Tyser781c9b82009-08-21 23:05:21 -0500639
wdenk217c9da2002-10-25 20:35:49 +0000640/* field-extraction macros */
641
642#define first(b) ((b)->fd)
643#define last(b) ((b)->bk)
644
645/*
646 Indexing into bins
647*/
648
649#define bin_index(sz) \
650(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
651 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
652 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
653 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
654 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
655 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
wdenk57b2d802003-06-27 21:31:46 +0000656 126)
wdenk217c9da2002-10-25 20:35:49 +0000657/*
658 bins for chunks < 512 are all spaced 8 bytes apart, and hold
659 identically sized chunks. This is exploited in malloc.
660*/
661
662#define MAX_SMALLBIN 63
663#define MAX_SMALLBIN_SIZE 512
664#define SMALLBIN_WIDTH 8
665
666#define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
667
668/*
669 Requests are `small' if both the corresponding and the next bin are small
670*/
671
672#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
673
Simon Glass7471cc72014-07-10 22:23:25 -0600674
wdenk217c9da2002-10-25 20:35:49 +0000675
676/*
677 To help compensate for the large number of bins, a one-level index
678 structure is used for bin-by-bin searching. `binblocks' is a
679 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
680 have any (possibly) non-empty bins, so they can be skipped over
681 all at once during during traversals. The bits are NOT always
682 cleared as soon as all bins in a block are empty, but instead only
683 when all are noticed to be empty during traversal in malloc.
684*/
685
686#define BINBLOCKWIDTH 4 /* bins per block */
687
Stefan Roese37628252008-08-06 14:05:38 +0200688#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */
689#define binblocks_w (av_[1])
wdenk217c9da2002-10-25 20:35:49 +0000690
691/* bin<->block macros */
692
693#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH))
Stefan Roese37628252008-08-06 14:05:38 +0200694#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
695#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
wdenk217c9da2002-10-25 20:35:49 +0000696
697
Simon Glass7471cc72014-07-10 22:23:25 -0600698
wdenk217c9da2002-10-25 20:35:49 +0000699
700
701/* Other static bookkeeping data */
702
703/* variables holding tunable values */
704
705static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
706static unsigned long top_pad = DEFAULT_TOP_PAD;
707static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
708static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
709
710/* The first value returned from sbrk */
711static char* sbrk_base = (char*)(-1);
712
713/* The maximum memory obtained from system via sbrk */
714static unsigned long max_sbrked_mem = 0;
715
716/* The maximum via either sbrk or mmap */
717static unsigned long max_total_mem = 0;
718
719/* internal working copy of mallinfo */
720static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
721
722/* The total memory obtained from system via sbrk */
723#define sbrked_mem (current_mallinfo.arena)
724
725/* Tracking mmaps */
726
Wolfgang Denk460a9ff2010-06-20 23:33:59 +0200727#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +0000728static unsigned int n_mmaps = 0;
Wolfgang Denk460a9ff2010-06-20 23:33:59 +0200729#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +0000730static unsigned long mmapped_mem = 0;
731#if HAVE_MMAP
732static unsigned int max_n_mmaps = 0;
733static unsigned long max_mmapped_mem = 0;
734#endif
735
Marek Bykowskib4032a72020-04-29 18:23:07 +0200736#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
737static void malloc_init(void)
738{
739 int i, j;
740
741 debug("bins (av_ array) are at %p\n", (void *)av_);
742
743 av_[0] = NULL; av_[1] = NULL;
744 for (i = 2, j = 2; i < NAV * 2 + 2; i += 2, j++) {
745 av_[i] = bin_at(j - 2);
746 av_[i + 1] = bin_at(j - 2);
Simon Glass7471cc72014-07-10 22:23:25 -0600747
Marek Bykowskib4032a72020-04-29 18:23:07 +0200748 /* Just print the first few bins so that
749 * we can see there are alright.
750 */
751 if (i < 10)
752 debug("av_[%d]=%lx av_[%d]=%lx\n",
753 i, (ulong)av_[i],
754 i + 1, (ulong)av_[i + 1]);
755 }
756
757 /* Init the static bookkeeping as well */
758 sbrk_base = (char *)(-1);
759 max_sbrked_mem = 0;
760 max_total_mem = 0;
761#ifdef DEBUG
762 memset((void *)&current_mallinfo, 0, sizeof(struct mallinfo));
763#endif
764}
765#endif
wdenk217c9da2002-10-25 20:35:49 +0000766
767/*
768 Debugging support
769*/
770
771#ifdef DEBUG
772
773
774/*
775 These routines make a number of assertions about the states
776 of data structures that should be true at all times. If any
777 are not true, it's very likely that a user program has somehow
778 trashed memory. (It's also possible that there is a coding error
779 in malloc. In which case, please report it!)
780*/
781
782#if __STD_C
783static void do_check_chunk(mchunkptr p)
784#else
785static void do_check_chunk(p) mchunkptr p;
786#endif
787{
wdenk217c9da2002-10-25 20:35:49 +0000788 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +0000789
790 /* No checkable chunk is mmapped */
791 assert(!chunk_is_mmapped(p));
792
793 /* Check for legal address ... */
794 assert((char*)p >= sbrk_base);
795 if (p != top)
796 assert((char*)p + sz <= (char*)top);
797 else
798 assert((char*)p + sz <= sbrk_base + sbrked_mem);
799
800}
801
802
803#if __STD_C
804static void do_check_free_chunk(mchunkptr p)
805#else
806static void do_check_free_chunk(p) mchunkptr p;
807#endif
808{
809 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +0000810 mchunkptr next = chunk_at_offset(p, sz);
wdenk217c9da2002-10-25 20:35:49 +0000811
812 do_check_chunk(p);
813
814 /* Check whether it claims to be free ... */
815 assert(!inuse(p));
816
817 /* Unless a special marker, must have OK fields */
818 if ((long)sz >= (long)MINSIZE)
819 {
820 assert((sz & MALLOC_ALIGN_MASK) == 0);
821 assert(aligned_OK(chunk2mem(p)));
822 /* ... matching footer field */
823 assert(next->prev_size == sz);
824 /* ... and is fully consolidated */
825 assert(prev_inuse(p));
826 assert (next == top || inuse(next));
827
828 /* ... and has minimally sane links */
829 assert(p->fd->bk == p);
830 assert(p->bk->fd == p);
831 }
832 else /* markers are always of size SIZE_SZ */
833 assert(sz == SIZE_SZ);
834}
835
836#if __STD_C
837static void do_check_inuse_chunk(mchunkptr p)
838#else
839static void do_check_inuse_chunk(p) mchunkptr p;
840#endif
841{
842 mchunkptr next = next_chunk(p);
843 do_check_chunk(p);
844
845 /* Check whether it claims to be in use ... */
846 assert(inuse(p));
847
848 /* ... and is surrounded by OK chunks.
849 Since more things can be checked with free chunks than inuse ones,
850 if an inuse chunk borders them and debug is on, it's worth doing them.
851 */
852 if (!prev_inuse(p))
853 {
854 mchunkptr prv = prev_chunk(p);
855 assert(next_chunk(prv) == p);
856 do_check_free_chunk(prv);
857 }
858 if (next == top)
859 {
860 assert(prev_inuse(next));
861 assert(chunksize(next) >= MINSIZE);
862 }
863 else if (!inuse(next))
864 do_check_free_chunk(next);
865
866}
867
868#if __STD_C
869static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
870#else
871static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
872#endif
873{
wdenk217c9da2002-10-25 20:35:49 +0000874 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
875 long room = sz - s;
wdenk217c9da2002-10-25 20:35:49 +0000876
877 do_check_inuse_chunk(p);
878
879 /* Legal size ... */
880 assert((long)sz >= (long)MINSIZE);
881 assert((sz & MALLOC_ALIGN_MASK) == 0);
882 assert(room >= 0);
883 assert(room < (long)MINSIZE);
884
885 /* ... and alignment */
886 assert(aligned_OK(chunk2mem(p)));
887
888
889 /* ... and was allocated at front of an available chunk */
890 assert(prev_inuse(p));
891
892}
893
894
895#define check_free_chunk(P) do_check_free_chunk(P)
896#define check_inuse_chunk(P) do_check_inuse_chunk(P)
897#define check_chunk(P) do_check_chunk(P)
898#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
899#else
900#define check_free_chunk(P)
901#define check_inuse_chunk(P)
902#define check_chunk(P)
903#define check_malloced_chunk(P,N)
904#endif
905
Simon Glass7471cc72014-07-10 22:23:25 -0600906
wdenk217c9da2002-10-25 20:35:49 +0000907
908/*
909 Macro-based internal utilities
910*/
911
912
913/*
914 Linking chunks in bin lists.
915 Call these only with variables, not arbitrary expressions, as arguments.
916*/
917
918/*
919 Place chunk p of size s in its bin, in size order,
920 putting it ahead of others of same size.
921*/
922
923
924#define frontlink(P, S, IDX, BK, FD) \
925{ \
926 if (S < MAX_SMALLBIN_SIZE) \
927 { \
928 IDX = smallbin_index(S); \
929 mark_binblock(IDX); \
930 BK = bin_at(IDX); \
931 FD = BK->fd; \
932 P->bk = BK; \
933 P->fd = FD; \
934 FD->bk = BK->fd = P; \
935 } \
936 else \
937 { \
938 IDX = bin_index(S); \
939 BK = bin_at(IDX); \
940 FD = BK->fd; \
941 if (FD == BK) mark_binblock(IDX); \
942 else \
943 { \
944 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
945 BK = FD->bk; \
946 } \
947 P->bk = BK; \
948 P->fd = FD; \
949 FD->bk = BK->fd = P; \
950 } \
951}
952
953
954/* take a chunk off a list */
955
956#define unlink(P, BK, FD) \
957{ \
958 BK = P->bk; \
959 FD = P->fd; \
960 FD->bk = BK; \
961 BK->fd = FD; \
962} \
963
964/* Place p as the last remainder */
965
966#define link_last_remainder(P) \
967{ \
968 last_remainder->fd = last_remainder->bk = P; \
969 P->fd = P->bk = last_remainder; \
970}
971
972/* Clear the last_remainder bin */
973
974#define clear_last_remainder \
975 (last_remainder->fd = last_remainder->bk = last_remainder)
976
977
wdenk217c9da2002-10-25 20:35:49 +0000978
979
Simon Glass7471cc72014-07-10 22:23:25 -0600980
wdenk217c9da2002-10-25 20:35:49 +0000981/* Routines dealing with mmap(). */
982
983#if HAVE_MMAP
984
985#if __STD_C
986static mchunkptr mmap_chunk(size_t size)
987#else
988static mchunkptr mmap_chunk(size) size_t size;
989#endif
990{
991 size_t page_mask = malloc_getpagesize - 1;
992 mchunkptr p;
993
994#ifndef MAP_ANONYMOUS
995 static int fd = -1;
996#endif
997
998 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
999
1000 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
1001 * there is no following chunk whose prev_size field could be used.
1002 */
1003 size = (size + SIZE_SZ + page_mask) & ~page_mask;
1004
1005#ifdef MAP_ANONYMOUS
1006 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
1007 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1008#else /* !MAP_ANONYMOUS */
1009 if (fd < 0)
1010 {
1011 fd = open("/dev/zero", O_RDWR);
1012 if(fd < 0) return 0;
1013 }
1014 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1015#endif
1016
1017 if(p == (mchunkptr)-1) return 0;
1018
1019 n_mmaps++;
1020 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1021
1022 /* We demand that eight bytes into a page must be 8-byte aligned. */
1023 assert(aligned_OK(chunk2mem(p)));
1024
1025 /* The offset to the start of the mmapped region is stored
1026 * in the prev_size field of the chunk; normally it is zero,
1027 * but that can be changed in memalign().
1028 */
1029 p->prev_size = 0;
1030 set_head(p, size|IS_MMAPPED);
1031
1032 mmapped_mem += size;
1033 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1034 max_mmapped_mem = mmapped_mem;
1035 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1036 max_total_mem = mmapped_mem + sbrked_mem;
1037 return p;
1038}
1039
1040#if __STD_C
1041static void munmap_chunk(mchunkptr p)
1042#else
1043static void munmap_chunk(p) mchunkptr p;
1044#endif
1045{
1046 INTERNAL_SIZE_T size = chunksize(p);
1047 int ret;
1048
1049 assert (chunk_is_mmapped(p));
1050 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1051 assert((n_mmaps > 0));
1052 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1053
1054 n_mmaps--;
1055 mmapped_mem -= (size + p->prev_size);
1056
1057 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1058
1059 /* munmap returns non-zero on failure */
1060 assert(ret == 0);
1061}
1062
1063#if HAVE_MREMAP
1064
1065#if __STD_C
1066static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1067#else
1068static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1069#endif
1070{
1071 size_t page_mask = malloc_getpagesize - 1;
1072 INTERNAL_SIZE_T offset = p->prev_size;
1073 INTERNAL_SIZE_T size = chunksize(p);
1074 char *cp;
1075
1076 assert (chunk_is_mmapped(p));
1077 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1078 assert((n_mmaps > 0));
1079 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1080
1081 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1082 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1083
1084 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1085
1086 if (cp == (char *)-1) return 0;
1087
1088 p = (mchunkptr)(cp + offset);
1089
1090 assert(aligned_OK(chunk2mem(p)));
1091
1092 assert((p->prev_size == offset));
1093 set_head(p, (new_size - offset)|IS_MMAPPED);
1094
1095 mmapped_mem -= size + offset;
1096 mmapped_mem += new_size;
1097 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1098 max_mmapped_mem = mmapped_mem;
1099 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1100 max_total_mem = mmapped_mem + sbrked_mem;
1101 return p;
1102}
1103
1104#endif /* HAVE_MREMAP */
1105
1106#endif /* HAVE_MMAP */
1107
wdenk217c9da2002-10-25 20:35:49 +00001108/*
1109 Extend the top-most chunk by obtaining memory from system.
1110 Main interface to sbrk (but see also malloc_trim).
1111*/
1112
1113#if __STD_C
1114static void malloc_extend_top(INTERNAL_SIZE_T nb)
1115#else
1116static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1117#endif
1118{
1119 char* brk; /* return value from sbrk */
1120 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1121 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
1122 char* new_brk; /* return of 2nd sbrk call */
1123 INTERNAL_SIZE_T top_size; /* new size of top chunk */
1124
1125 mchunkptr old_top = top; /* Record state of old top */
1126 INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1127 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
1128
1129 /* Pad request with top_pad plus minimal overhead */
1130
1131 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
1132 unsigned long pagesz = malloc_getpagesize;
1133
1134 /* If not the first time through, round to preserve page boundary */
1135 /* Otherwise, we need to correct to a page size below anyway. */
1136 /* (We also correct below if an intervening foreign sbrk call.) */
1137
1138 if (sbrk_base != (char*)(-1))
1139 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1140
1141 brk = (char*)(MORECORE (sbrk_size));
1142
1143 /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1144 if (brk == (char*)(MORECORE_FAILURE) ||
1145 (brk < old_end && old_top != initial_top))
1146 return;
1147
1148 sbrked_mem += sbrk_size;
1149
1150 if (brk == old_end) /* can just add bytes to current top */
1151 {
1152 top_size = sbrk_size + old_top_size;
1153 set_head(top, top_size | PREV_INUSE);
1154 }
1155 else
1156 {
1157 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
1158 sbrk_base = brk;
1159 else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
1160 sbrked_mem += brk - (char*)old_end;
1161
1162 /* Guarantee alignment of first new chunk made from this space */
1163 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1164 if (front_misalign > 0)
1165 {
1166 correction = (MALLOC_ALIGNMENT) - front_misalign;
1167 brk += correction;
1168 }
1169 else
1170 correction = 0;
1171
1172 /* Guarantee the next brk will be at a page boundary */
1173
1174 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
wdenk57b2d802003-06-27 21:31:46 +00001175 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
wdenk217c9da2002-10-25 20:35:49 +00001176
1177 /* Allocate correction */
1178 new_brk = (char*)(MORECORE (correction));
1179 if (new_brk == (char*)(MORECORE_FAILURE)) return;
1180
1181 sbrked_mem += correction;
1182
1183 top = (mchunkptr)brk;
1184 top_size = new_brk - brk + correction;
1185 set_head(top, top_size | PREV_INUSE);
1186
1187 if (old_top != initial_top)
1188 {
1189
1190 /* There must have been an intervening foreign sbrk call. */
1191 /* A double fencepost is necessary to prevent consolidation */
1192
1193 /* If not enough space to do this, then user did something very wrong */
1194 if (old_top_size < MINSIZE)
1195 {
wdenk57b2d802003-06-27 21:31:46 +00001196 set_head(top, PREV_INUSE); /* will force null return from malloc */
1197 return;
wdenk217c9da2002-10-25 20:35:49 +00001198 }
1199
1200 /* Also keep size a multiple of MALLOC_ALIGNMENT */
1201 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1202 set_head_size(old_top, old_top_size);
1203 chunk_at_offset(old_top, old_top_size )->size =
wdenk57b2d802003-06-27 21:31:46 +00001204 SIZE_SZ|PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +00001205 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
wdenk57b2d802003-06-27 21:31:46 +00001206 SIZE_SZ|PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +00001207 /* If possible, release the rest. */
1208 if (old_top_size >= MINSIZE)
wdenk57b2d802003-06-27 21:31:46 +00001209 fREe(chunk2mem(old_top));
wdenk217c9da2002-10-25 20:35:49 +00001210 }
1211 }
1212
1213 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1214 max_sbrked_mem = sbrked_mem;
1215 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1216 max_total_mem = mmapped_mem + sbrked_mem;
1217
1218 /* We always land on a page boundary */
1219 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1220}
1221
1222
Simon Glass7471cc72014-07-10 22:23:25 -06001223
wdenk217c9da2002-10-25 20:35:49 +00001224
1225/* Main public routines */
1226
1227
1228/*
1229 Malloc Algorthim:
1230
1231 The requested size is first converted into a usable form, `nb'.
1232 This currently means to add 4 bytes overhead plus possibly more to
1233 obtain 8-byte alignment and/or to obtain a size of at least
1234 MINSIZE (currently 16 bytes), the smallest allocatable size.
1235 (All fits are considered `exact' if they are within MINSIZE bytes.)
1236
1237 From there, the first successful of the following steps is taken:
1238
1239 1. The bin corresponding to the request size is scanned, and if
wdenk57b2d802003-06-27 21:31:46 +00001240 a chunk of exactly the right size is found, it is taken.
wdenk217c9da2002-10-25 20:35:49 +00001241
1242 2. The most recently remaindered chunk is used if it is big
wdenk57b2d802003-06-27 21:31:46 +00001243 enough. This is a form of (roving) first fit, used only in
1244 the absence of exact fits. Runs of consecutive requests use
1245 the remainder of the chunk used for the previous such request
1246 whenever possible. This limited use of a first-fit style
1247 allocation strategy tends to give contiguous chunks
1248 coextensive lifetimes, which improves locality and can reduce
1249 fragmentation in the long run.
wdenk217c9da2002-10-25 20:35:49 +00001250
1251 3. Other bins are scanned in increasing size order, using a
wdenk57b2d802003-06-27 21:31:46 +00001252 chunk big enough to fulfill the request, and splitting off
1253 any remainder. This search is strictly by best-fit; i.e.,
1254 the smallest (with ties going to approximately the least
1255 recently used) chunk that fits is selected.
wdenk217c9da2002-10-25 20:35:49 +00001256
1257 4. If large enough, the chunk bordering the end of memory
wdenk57b2d802003-06-27 21:31:46 +00001258 (`top') is split off. (This use of `top' is in accord with
1259 the best-fit search rule. In effect, `top' is treated as
1260 larger (and thus less well fitting) than any other available
1261 chunk since it can be extended to be as large as necessary
1262 (up to system limitations).
wdenk217c9da2002-10-25 20:35:49 +00001263
1264 5. If the request size meets the mmap threshold and the
wdenk57b2d802003-06-27 21:31:46 +00001265 system supports mmap, and there are few enough currently
1266 allocated mmapped regions, and a call to mmap succeeds,
1267 the request is allocated via direct memory mapping.
wdenk217c9da2002-10-25 20:35:49 +00001268
1269 6. Otherwise, the top of memory is extended by
wdenk57b2d802003-06-27 21:31:46 +00001270 obtaining more space from the system (normally using sbrk,
1271 but definable to anything else via the MORECORE macro).
1272 Memory is gathered from the system (in system page-sized
1273 units) in a way that allows chunks obtained across different
1274 sbrk calls to be consolidated, but does not require
1275 contiguous memory. Thus, it should be safe to intersperse
1276 mallocs with other sbrk calls.
wdenk217c9da2002-10-25 20:35:49 +00001277
1278
1279 All allocations are made from the the `lowest' part of any found
1280 chunk. (The implementation invariant is that prev_inuse is
1281 always true of any allocated chunk; i.e., that each allocated
1282 chunk borders either a previously allocated and still in-use chunk,
1283 or the base of its memory arena.)
1284
1285*/
1286
1287#if __STD_C
1288Void_t* mALLOc(size_t bytes)
1289#else
1290Void_t* mALLOc(bytes) size_t bytes;
1291#endif
1292{
1293 mchunkptr victim; /* inspected/selected chunk */
1294 INTERNAL_SIZE_T victim_size; /* its size */
1295 int idx; /* index for bin traversal */
1296 mbinptr bin; /* associated bin */
1297 mchunkptr remainder; /* remainder from a split */
1298 long remainder_size; /* its size */
1299 int remainder_index; /* its bin index */
1300 unsigned long block; /* block traverser bit */
1301 int startidx; /* first bin of a traversed block */
1302 mchunkptr fwd; /* misc temp for linking */
1303 mchunkptr bck; /* misc temp for linking */
1304 mbinptr q; /* misc temp */
1305
1306 INTERNAL_SIZE_T nb;
1307
Andy Yan1fa20e4d2017-07-24 17:43:34 +08001308#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Stephen Warren317581e2016-03-05 10:30:53 -07001309 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
Simon Glass94890462014-11-10 17:16:43 -07001310 return malloc_simple(bytes);
Simon Glass863e4042014-07-10 22:23:28 -06001311#endif
1312
Simon Glass1a3e39b2022-09-06 20:27:00 -06001313 if (CONFIG_IS_ENABLED(UNIT_TEST) && malloc_testing) {
1314 if (--malloc_max_allocs < 0)
1315 return NULL;
1316 }
1317
Wolfgang Denkb6349422010-01-15 11:20:10 +01001318 /* check if mem_malloc_init() was run */
1319 if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) {
1320 /* not initialized yet */
Kim Phillipsb052b602012-10-29 13:34:32 +00001321 return NULL;
Wolfgang Denkb6349422010-01-15 11:20:10 +01001322 }
1323
Kim Phillipsb052b602012-10-29 13:34:32 +00001324 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001325
1326 nb = request2size(bytes); /* padded request size; */
1327
1328 /* Check for exact match in a bin */
1329
1330 if (is_small_request(nb)) /* Faster version for small requests */
1331 {
1332 idx = smallbin_index(nb);
1333
1334 /* No traversal or size check necessary for small bins. */
1335
1336 q = bin_at(idx);
1337 victim = last(q);
1338
1339 /* Also scan the next one, since it would have a remainder < MINSIZE */
1340 if (victim == q)
1341 {
1342 q = next_bin(q);
1343 victim = last(q);
1344 }
1345 if (victim != q)
1346 {
1347 victim_size = chunksize(victim);
1348 unlink(victim, bck, fwd);
1349 set_inuse_bit_at_offset(victim, victim_size);
1350 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001351 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001352 return chunk2mem(victim);
1353 }
1354
1355 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1356
1357 }
1358 else
1359 {
1360 idx = bin_index(nb);
1361 bin = bin_at(idx);
1362
1363 for (victim = last(bin); victim != bin; victim = victim->bk)
1364 {
1365 victim_size = chunksize(victim);
1366 remainder_size = victim_size - nb;
1367
1368 if (remainder_size >= (long)MINSIZE) /* too big */
1369 {
wdenk57b2d802003-06-27 21:31:46 +00001370 --idx; /* adjust to rescan below after checking last remainder */
1371 break;
wdenk217c9da2002-10-25 20:35:49 +00001372 }
1373
1374 else if (remainder_size >= 0) /* exact fit */
1375 {
wdenk57b2d802003-06-27 21:31:46 +00001376 unlink(victim, bck, fwd);
1377 set_inuse_bit_at_offset(victim, victim_size);
1378 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001379 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001380 return chunk2mem(victim);
wdenk217c9da2002-10-25 20:35:49 +00001381 }
1382 }
1383
1384 ++idx;
1385
1386 }
1387
1388 /* Try to use the last split-off remainder */
1389
1390 if ( (victim = last_remainder->fd) != last_remainder)
1391 {
1392 victim_size = chunksize(victim);
1393 remainder_size = victim_size - nb;
1394
1395 if (remainder_size >= (long)MINSIZE) /* re-split */
1396 {
1397 remainder = chunk_at_offset(victim, nb);
1398 set_head(victim, nb | PREV_INUSE);
1399 link_last_remainder(remainder);
1400 set_head(remainder, remainder_size | PREV_INUSE);
1401 set_foot(remainder, remainder_size);
1402 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001403 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001404 return chunk2mem(victim);
1405 }
1406
1407 clear_last_remainder;
1408
1409 if (remainder_size >= 0) /* exhaust */
1410 {
1411 set_inuse_bit_at_offset(victim, victim_size);
1412 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001413 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001414 return chunk2mem(victim);
1415 }
1416
1417 /* Else place in bin */
1418
1419 frontlink(victim, victim_size, remainder_index, bck, fwd);
1420 }
1421
1422 /*
1423 If there are any possibly nonempty big-enough blocks,
1424 search for best fitting chunk by scanning bins in blockwidth units.
1425 */
1426
Stefan Roese37628252008-08-06 14:05:38 +02001427 if ( (block = idx2binblock(idx)) <= binblocks_r)
wdenk217c9da2002-10-25 20:35:49 +00001428 {
1429
1430 /* Get to the first marked block */
1431
Stefan Roese37628252008-08-06 14:05:38 +02001432 if ( (block & binblocks_r) == 0)
wdenk217c9da2002-10-25 20:35:49 +00001433 {
1434 /* force to an even block boundary */
1435 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1436 block <<= 1;
Stefan Roese37628252008-08-06 14:05:38 +02001437 while ((block & binblocks_r) == 0)
wdenk217c9da2002-10-25 20:35:49 +00001438 {
wdenk57b2d802003-06-27 21:31:46 +00001439 idx += BINBLOCKWIDTH;
1440 block <<= 1;
wdenk217c9da2002-10-25 20:35:49 +00001441 }
1442 }
1443
1444 /* For each possibly nonempty block ... */
1445 for (;;)
1446 {
1447 startidx = idx; /* (track incomplete blocks) */
1448 q = bin = bin_at(idx);
1449
1450 /* For each bin in this block ... */
1451 do
1452 {
wdenk57b2d802003-06-27 21:31:46 +00001453 /* Find and use first big enough chunk ... */
wdenk217c9da2002-10-25 20:35:49 +00001454
wdenk57b2d802003-06-27 21:31:46 +00001455 for (victim = last(bin); victim != bin; victim = victim->bk)
1456 {
1457 victim_size = chunksize(victim);
1458 remainder_size = victim_size - nb;
wdenk217c9da2002-10-25 20:35:49 +00001459
wdenk57b2d802003-06-27 21:31:46 +00001460 if (remainder_size >= (long)MINSIZE) /* split */
1461 {
1462 remainder = chunk_at_offset(victim, nb);
1463 set_head(victim, nb | PREV_INUSE);
1464 unlink(victim, bck, fwd);
1465 link_last_remainder(remainder);
1466 set_head(remainder, remainder_size | PREV_INUSE);
1467 set_foot(remainder, remainder_size);
1468 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001469 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001470 return chunk2mem(victim);
1471 }
wdenk217c9da2002-10-25 20:35:49 +00001472
wdenk57b2d802003-06-27 21:31:46 +00001473 else if (remainder_size >= 0) /* take */
1474 {
1475 set_inuse_bit_at_offset(victim, victim_size);
1476 unlink(victim, bck, fwd);
1477 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001478 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001479 return chunk2mem(victim);
1480 }
wdenk217c9da2002-10-25 20:35:49 +00001481
wdenk57b2d802003-06-27 21:31:46 +00001482 }
wdenk217c9da2002-10-25 20:35:49 +00001483
1484 bin = next_bin(bin);
1485
1486 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1487
1488 /* Clear out the block bit. */
1489
1490 do /* Possibly backtrack to try to clear a partial block */
1491 {
wdenk57b2d802003-06-27 21:31:46 +00001492 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1493 {
Stefan Roese37628252008-08-06 14:05:38 +02001494 av_[1] = (mbinptr)(binblocks_r & ~block);
wdenk57b2d802003-06-27 21:31:46 +00001495 break;
1496 }
1497 --startidx;
wdenk217c9da2002-10-25 20:35:49 +00001498 q = prev_bin(q);
1499 } while (first(q) == q);
1500
1501 /* Get to the next possibly nonempty block */
1502
Stefan Roese37628252008-08-06 14:05:38 +02001503 if ( (block <<= 1) <= binblocks_r && (block != 0) )
wdenk217c9da2002-10-25 20:35:49 +00001504 {
Stefan Roese37628252008-08-06 14:05:38 +02001505 while ((block & binblocks_r) == 0)
wdenk57b2d802003-06-27 21:31:46 +00001506 {
1507 idx += BINBLOCKWIDTH;
1508 block <<= 1;
1509 }
wdenk217c9da2002-10-25 20:35:49 +00001510 }
1511 else
wdenk57b2d802003-06-27 21:31:46 +00001512 break;
wdenk217c9da2002-10-25 20:35:49 +00001513 }
1514 }
1515
1516
1517 /* Try to use top chunk */
1518
1519 /* Require that there be a remainder, ensuring top always exists */
1520 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1521 {
1522
1523#if HAVE_MMAP
1524 /* If big and would otherwise need to extend, try to use mmap instead */
1525 if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001526 (victim = mmap_chunk(nb)))
Sean Anderson98011e22022-03-23 14:04:49 -04001527 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001528 return chunk2mem(victim);
1529#endif
1530
1531 /* Try to extend */
1532 malloc_extend_top(nb);
1533 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
Kim Phillipsb052b602012-10-29 13:34:32 +00001534 return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001535 }
1536
1537 victim = top;
1538 set_head(victim, nb | PREV_INUSE);
1539 top = chunk_at_offset(victim, nb);
1540 set_head(top, remainder_size | PREV_INUSE);
1541 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001542 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001543 return chunk2mem(victim);
1544
1545}
1546
1547
Simon Glass7471cc72014-07-10 22:23:25 -06001548
wdenk217c9da2002-10-25 20:35:49 +00001549
1550/*
1551
1552 free() algorithm :
1553
1554 cases:
1555
1556 1. free(0) has no effect.
1557
1558 2. If the chunk was allocated via mmap, it is release via munmap().
1559
1560 3. If a returned chunk borders the current high end of memory,
wdenk57b2d802003-06-27 21:31:46 +00001561 it is consolidated into the top, and if the total unused
1562 topmost memory exceeds the trim threshold, malloc_trim is
1563 called.
wdenk217c9da2002-10-25 20:35:49 +00001564
1565 4. Other chunks are consolidated as they arrive, and
wdenk57b2d802003-06-27 21:31:46 +00001566 placed in corresponding bins. (This includes the case of
1567 consolidating with the current `last_remainder').
wdenk217c9da2002-10-25 20:35:49 +00001568
1569*/
1570
1571
1572#if __STD_C
1573void fREe(Void_t* mem)
1574#else
1575void fREe(mem) Void_t* mem;
1576#endif
1577{
1578 mchunkptr p; /* chunk corresponding to mem */
1579 INTERNAL_SIZE_T hd; /* its head field */
1580 INTERNAL_SIZE_T sz; /* its size */
1581 int idx; /* its bin index */
1582 mchunkptr next; /* next contiguous chunk */
1583 INTERNAL_SIZE_T nextsz; /* its size */
1584 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1585 mchunkptr bck; /* misc temp for linking */
1586 mchunkptr fwd; /* misc temp for linking */
1587 int islr; /* track whether merging with last_remainder */
1588
Andy Yan1fa20e4d2017-07-24 17:43:34 +08001589#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Simon Glass863e4042014-07-10 22:23:28 -06001590 /* free() is a no-op - all the memory will be freed on relocation */
Sean Anderson98011e22022-03-23 14:04:49 -04001591 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1592 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
Simon Glass863e4042014-07-10 22:23:28 -06001593 return;
Sean Anderson98011e22022-03-23 14:04:49 -04001594 }
Simon Glass863e4042014-07-10 22:23:28 -06001595#endif
1596
Kim Phillipsb052b602012-10-29 13:34:32 +00001597 if (mem == NULL) /* free(0) has no effect */
wdenk217c9da2002-10-25 20:35:49 +00001598 return;
1599
1600 p = mem2chunk(mem);
1601 hd = p->size;
1602
1603#if HAVE_MMAP
1604 if (hd & IS_MMAPPED) /* release mmapped memory. */
1605 {
1606 munmap_chunk(p);
1607 return;
1608 }
1609#endif
1610
1611 check_inuse_chunk(p);
1612
1613 sz = hd & ~PREV_INUSE;
1614 next = chunk_at_offset(p, sz);
1615 nextsz = chunksize(next);
Sean Anderson98011e22022-03-23 14:04:49 -04001616 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
wdenk217c9da2002-10-25 20:35:49 +00001617
1618 if (next == top) /* merge with top */
1619 {
1620 sz += nextsz;
1621
1622 if (!(hd & PREV_INUSE)) /* consolidate backward */
1623 {
1624 prevsz = p->prev_size;
1625 p = chunk_at_offset(p, -((long) prevsz));
1626 sz += prevsz;
1627 unlink(p, bck, fwd);
1628 }
1629
1630 set_head(p, sz | PREV_INUSE);
1631 top = p;
1632 if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1633 malloc_trim(top_pad);
1634 return;
1635 }
1636
1637 set_head(next, nextsz); /* clear inuse bit */
1638
1639 islr = 0;
1640
1641 if (!(hd & PREV_INUSE)) /* consolidate backward */
1642 {
1643 prevsz = p->prev_size;
1644 p = chunk_at_offset(p, -((long) prevsz));
1645 sz += prevsz;
1646
1647 if (p->fd == last_remainder) /* keep as last_remainder */
1648 islr = 1;
1649 else
1650 unlink(p, bck, fwd);
1651 }
1652
1653 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
1654 {
1655 sz += nextsz;
1656
1657 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1658 {
1659 islr = 1;
1660 link_last_remainder(p);
1661 }
1662 else
1663 unlink(next, bck, fwd);
1664 }
1665
1666
1667 set_head(p, sz | PREV_INUSE);
1668 set_foot(p, sz);
1669 if (!islr)
1670 frontlink(p, sz, idx, bck, fwd);
1671}
1672
1673
wdenk217c9da2002-10-25 20:35:49 +00001674
1675
Simon Glass7471cc72014-07-10 22:23:25 -06001676
wdenk217c9da2002-10-25 20:35:49 +00001677/*
1678
1679 Realloc algorithm:
1680
1681 Chunks that were obtained via mmap cannot be extended or shrunk
1682 unless HAVE_MREMAP is defined, in which case mremap is used.
1683 Otherwise, if their reallocation is for additional space, they are
1684 copied. If for less, they are just left alone.
1685
1686 Otherwise, if the reallocation is for additional space, and the
1687 chunk can be extended, it is, else a malloc-copy-free sequence is
1688 taken. There are several different ways that a chunk could be
1689 extended. All are tried:
1690
1691 * Extending forward into following adjacent free chunk.
1692 * Shifting backwards, joining preceding adjacent space
1693 * Both shifting backwards and extending forward.
1694 * Extending into newly sbrked space
1695
1696 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1697 size argument of zero (re)allocates a minimum-sized chunk.
1698
1699 If the reallocation is for less space, and the new request is for
1700 a `small' (<512 bytes) size, then the newly unused space is lopped
1701 off and freed.
1702
1703 The old unix realloc convention of allowing the last-free'd chunk
1704 to be used as an argument to realloc is no longer supported.
1705 I don't know of any programs still relying on this feature,
1706 and allowing it would also allow too many other incorrect
1707 usages of realloc to be sensible.
1708
1709
1710*/
1711
1712
1713#if __STD_C
1714Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1715#else
1716Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1717#endif
1718{
1719 INTERNAL_SIZE_T nb; /* padded request size */
1720
1721 mchunkptr oldp; /* chunk corresponding to oldmem */
1722 INTERNAL_SIZE_T oldsize; /* its size */
1723
1724 mchunkptr newp; /* chunk to return */
1725 INTERNAL_SIZE_T newsize; /* its size */
1726 Void_t* newmem; /* corresponding user mem */
1727
1728 mchunkptr next; /* next contiguous chunk after oldp */
1729 INTERNAL_SIZE_T nextsize; /* its size */
1730
1731 mchunkptr prev; /* previous contiguous chunk before oldp */
1732 INTERNAL_SIZE_T prevsize; /* its size */
1733
1734 mchunkptr remainder; /* holds split off extra space from newp */
1735 INTERNAL_SIZE_T remainder_size; /* its size */
1736
1737 mchunkptr bck; /* misc temp for linking */
1738 mchunkptr fwd; /* misc temp for linking */
1739
1740#ifdef REALLOC_ZERO_BYTES_FREES
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001741 if (!bytes) {
1742 fREe(oldmem);
1743 return NULL;
1744 }
wdenk217c9da2002-10-25 20:35:49 +00001745#endif
1746
Kim Phillipsb052b602012-10-29 13:34:32 +00001747 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001748
1749 /* realloc of null is supposed to be same as malloc */
Kim Phillipsb052b602012-10-29 13:34:32 +00001750 if (oldmem == NULL) return mALLOc(bytes);
wdenk217c9da2002-10-25 20:35:49 +00001751
Andy Yan1fa20e4d2017-07-24 17:43:34 +08001752#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Simon Glass94890462014-11-10 17:16:43 -07001753 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Simon Glass863e4042014-07-10 22:23:28 -06001754 /* This is harder to support and should not be needed */
1755 panic("pre-reloc realloc() is not supported");
1756 }
1757#endif
1758
wdenk217c9da2002-10-25 20:35:49 +00001759 newp = oldp = mem2chunk(oldmem);
1760 newsize = oldsize = chunksize(oldp);
1761
1762
1763 nb = request2size(bytes);
1764
1765#if HAVE_MMAP
1766 if (chunk_is_mmapped(oldp))
1767 {
1768#if HAVE_MREMAP
1769 newp = mremap_chunk(oldp, nb);
1770 if(newp) return chunk2mem(newp);
1771#endif
1772 /* Note the extra SIZE_SZ overhead. */
1773 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1774 /* Must alloc, copy, free. */
1775 newmem = mALLOc(bytes);
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001776 if (!newmem)
1777 return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001778 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1779 munmap_chunk(oldp);
1780 return newmem;
1781 }
1782#endif
1783
1784 check_inuse_chunk(oldp);
1785
1786 if ((long)(oldsize) < (long)(nb))
1787 {
1788
1789 /* Try expanding forward */
1790
1791 next = chunk_at_offset(oldp, oldsize);
1792 if (next == top || !inuse(next))
1793 {
1794 nextsize = chunksize(next);
1795
1796 /* Forward into top only if a remainder */
1797 if (next == top)
1798 {
wdenk57b2d802003-06-27 21:31:46 +00001799 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1800 {
1801 newsize += nextsize;
1802 top = chunk_at_offset(oldp, nb);
1803 set_head(top, (newsize - nb) | PREV_INUSE);
1804 set_head_size(oldp, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001805 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
1806 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
wdenk57b2d802003-06-27 21:31:46 +00001807 return chunk2mem(oldp);
1808 }
wdenk217c9da2002-10-25 20:35:49 +00001809 }
1810
1811 /* Forward into next chunk */
1812 else if (((long)(nextsize + newsize) >= (long)(nb)))
1813 {
wdenk57b2d802003-06-27 21:31:46 +00001814 unlink(next, bck, fwd);
1815 newsize += nextsize;
Sean Anderson98011e22022-03-23 14:04:49 -04001816 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
1817 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
wdenk57b2d802003-06-27 21:31:46 +00001818 goto split;
wdenk217c9da2002-10-25 20:35:49 +00001819 }
1820 }
1821 else
1822 {
Kim Phillipsb052b602012-10-29 13:34:32 +00001823 next = NULL;
wdenk217c9da2002-10-25 20:35:49 +00001824 nextsize = 0;
1825 }
1826
1827 /* Try shifting backwards. */
1828
1829 if (!prev_inuse(oldp))
1830 {
1831 prev = prev_chunk(oldp);
1832 prevsize = chunksize(prev);
1833
1834 /* try forward + backward first to save a later consolidation */
1835
Kim Phillipsb052b602012-10-29 13:34:32 +00001836 if (next != NULL)
wdenk217c9da2002-10-25 20:35:49 +00001837 {
wdenk57b2d802003-06-27 21:31:46 +00001838 /* into top */
1839 if (next == top)
1840 {
1841 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1842 {
1843 unlink(prev, bck, fwd);
1844 newp = prev;
1845 newsize += prevsize + nextsize;
1846 newmem = chunk2mem(newp);
Sean Anderson98011e22022-03-23 14:04:49 -04001847 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001848 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1849 top = chunk_at_offset(newp, nb);
1850 set_head(top, (newsize - nb) | PREV_INUSE);
1851 set_head_size(newp, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001852 VALGRIND_FREELIKE_BLOCK(oldmem, SIZE_SZ);
wdenk57b2d802003-06-27 21:31:46 +00001853 return newmem;
1854 }
1855 }
wdenk217c9da2002-10-25 20:35:49 +00001856
wdenk57b2d802003-06-27 21:31:46 +00001857 /* into next chunk */
1858 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1859 {
1860 unlink(next, bck, fwd);
1861 unlink(prev, bck, fwd);
1862 newp = prev;
1863 newsize += nextsize + prevsize;
1864 newmem = chunk2mem(newp);
Sean Anderson98011e22022-03-23 14:04:49 -04001865 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001866 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1867 goto split;
1868 }
wdenk217c9da2002-10-25 20:35:49 +00001869 }
1870
1871 /* backward only */
Kim Phillipsb052b602012-10-29 13:34:32 +00001872 if (prev != NULL && (long)(prevsize + newsize) >= (long)nb)
wdenk217c9da2002-10-25 20:35:49 +00001873 {
wdenk57b2d802003-06-27 21:31:46 +00001874 unlink(prev, bck, fwd);
1875 newp = prev;
1876 newsize += prevsize;
1877 newmem = chunk2mem(newp);
Sean Anderson98011e22022-03-23 14:04:49 -04001878 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001879 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1880 goto split;
wdenk217c9da2002-10-25 20:35:49 +00001881 }
1882 }
1883
1884 /* Must allocate */
1885
1886 newmem = mALLOc (bytes);
1887
Kim Phillipsb052b602012-10-29 13:34:32 +00001888 if (newmem == NULL) /* propagate failure */
1889 return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001890
1891 /* Avoid copy if newp is next chunk after oldp. */
1892 /* (This can only happen when new chunk is sbrk'ed.) */
1893
1894 if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1895 {
1896 newsize += chunksize(newp);
1897 newp = oldp;
1898 goto split;
1899 }
1900
1901 /* Otherwise copy, free, and exit */
1902 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1903 fREe(oldmem);
1904 return newmem;
Sean Anderson98011e22022-03-23 14:04:49 -04001905 } else {
1906 VALGRIND_RESIZEINPLACE_BLOCK(oldmem, 0, bytes, SIZE_SZ);
1907 VALGRIND_MAKE_MEM_DEFINED(oldmem, bytes);
wdenk217c9da2002-10-25 20:35:49 +00001908 }
1909
1910
1911 split: /* split off extra room in old or expanded chunk */
1912
1913 if (newsize - nb >= MINSIZE) /* split off remainder */
1914 {
1915 remainder = chunk_at_offset(newp, nb);
1916 remainder_size = newsize - nb;
1917 set_head_size(newp, nb);
1918 set_head(remainder, remainder_size | PREV_INUSE);
1919 set_inuse_bit_at_offset(remainder, remainder_size);
Sean Anderson98011e22022-03-23 14:04:49 -04001920 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
1921 false);
wdenk217c9da2002-10-25 20:35:49 +00001922 fREe(chunk2mem(remainder)); /* let free() deal with it */
1923 }
1924 else
1925 {
1926 set_head_size(newp, newsize);
1927 set_inuse_bit_at_offset(newp, newsize);
1928 }
1929
1930 check_inuse_chunk(newp);
1931 return chunk2mem(newp);
1932}
1933
1934
Simon Glass7471cc72014-07-10 22:23:25 -06001935
wdenk217c9da2002-10-25 20:35:49 +00001936
1937/*
1938
1939 memalign algorithm:
1940
1941 memalign requests more than enough space from malloc, finds a spot
1942 within that chunk that meets the alignment request, and then
1943 possibly frees the leading and trailing space.
1944
1945 The alignment argument must be a power of two. This property is not
1946 checked by memalign, so misuse may result in random runtime errors.
1947
1948 8-byte alignment is guaranteed by normal malloc calls, so don't
1949 bother calling memalign with an argument of 8 or less.
1950
1951 Overreliance on memalign is a sure way to fragment space.
1952
1953*/
1954
1955
1956#if __STD_C
1957Void_t* mEMALIGn(size_t alignment, size_t bytes)
1958#else
1959Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
1960#endif
1961{
1962 INTERNAL_SIZE_T nb; /* padded request size */
1963 char* m; /* memory returned by malloc call */
1964 mchunkptr p; /* corresponding chunk */
1965 char* brk; /* alignment point within p */
1966 mchunkptr newp; /* chunk to return */
1967 INTERNAL_SIZE_T newsize; /* its size */
1968 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
1969 mchunkptr remainder; /* spare room at end to split off */
1970 long remainder_size; /* its size */
1971
Kim Phillipsb052b602012-10-29 13:34:32 +00001972 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001973
Ley Foon Tan2427ec62018-05-18 18:03:12 +08001974#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1975 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Andreas Dannenbergecc27402019-03-27 13:17:26 -05001976 return memalign_simple(alignment, bytes);
Ley Foon Tan2427ec62018-05-18 18:03:12 +08001977 }
1978#endif
1979
wdenk217c9da2002-10-25 20:35:49 +00001980 /* If need less alignment than we give anyway, just relay to malloc */
1981
1982 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
1983
1984 /* Otherwise, ensure that it is at least a minimum chunk size */
1985
1986 if (alignment < MINSIZE) alignment = MINSIZE;
1987
1988 /* Call malloc with worst case padding to hit alignment. */
1989
1990 nb = request2size(bytes);
1991 m = (char*)(mALLOc(nb + alignment + MINSIZE));
1992
Stephen Warren54ca0c72016-01-25 14:03:42 -07001993 /*
1994 * The attempt to over-allocate (with a size large enough to guarantee the
1995 * ability to find an aligned region within allocated memory) failed.
1996 *
1997 * Try again, this time only allocating exactly the size the user wants. If
1998 * the allocation now succeeds and just happens to be aligned, we can still
1999 * fulfill the user's request.
2000 */
2001 if (m == NULL) {
Stephen Warren44628b82016-04-25 15:55:42 -06002002 size_t extra, extra2;
Stephen Warren54ca0c72016-01-25 14:03:42 -07002003 /*
2004 * Use bytes not nb, since mALLOc internally calls request2size too, and
2005 * each call increases the size to allocate, to account for the header.
2006 */
2007 m = (char*)(mALLOc(bytes));
2008 /* Aligned -> return it */
2009 if ((((unsigned long)(m)) % alignment) == 0)
2010 return m;
Stephen Warren44628b82016-04-25 15:55:42 -06002011 /*
2012 * Otherwise, try again, requesting enough extra space to be able to
2013 * acquire alignment.
2014 */
Stephen Warren54ca0c72016-01-25 14:03:42 -07002015 fREe(m);
Stephen Warren44628b82016-04-25 15:55:42 -06002016 /* Add in extra bytes to match misalignment of unexpanded allocation */
2017 extra = alignment - (((unsigned long)(m)) % alignment);
2018 m = (char*)(mALLOc(bytes + extra));
2019 /*
2020 * m might not be the same as before. Validate that the previous value of
2021 * extra still works for the current value of m.
2022 * If (!m), extra2=alignment so
2023 */
2024 if (m) {
2025 extra2 = alignment - (((unsigned long)(m)) % alignment);
2026 if (extra2 > extra) {
2027 fREe(m);
2028 m = NULL;
2029 }
2030 }
2031 /* Fall through to original NULL check and chunk splitting logic */
Stephen Warren54ca0c72016-01-25 14:03:42 -07002032 }
2033
Kim Phillipsb052b602012-10-29 13:34:32 +00002034 if (m == NULL) return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00002035
2036 p = mem2chunk(m);
2037
2038 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
2039 {
2040#if HAVE_MMAP
2041 if(chunk_is_mmapped(p))
2042 return chunk2mem(p); /* nothing more to do */
2043#endif
2044 }
2045 else /* misaligned */
2046 {
2047 /*
2048 Find an aligned spot inside chunk.
2049 Since we need to give back leading space in a chunk of at
2050 least MINSIZE, if the first calculation places us at
2051 a spot with less than MINSIZE leader, we can move to the
2052 next aligned spot -- we've allocated enough total room so that
2053 this is always possible.
2054 */
2055
2056 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
2057 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
2058
2059 newp = (mchunkptr)brk;
2060 leadsize = brk - (char*)(p);
2061 newsize = chunksize(p) - leadsize;
2062
2063#if HAVE_MMAP
2064 if(chunk_is_mmapped(p))
2065 {
2066 newp->prev_size = p->prev_size + leadsize;
2067 set_head(newp, newsize|IS_MMAPPED);
2068 return chunk2mem(newp);
2069 }
2070#endif
2071
2072 /* give back leader, use the rest */
2073
2074 set_head(newp, newsize | PREV_INUSE);
2075 set_inuse_bit_at_offset(newp, newsize);
2076 set_head_size(p, leadsize);
2077 fREe(chunk2mem(p));
2078 p = newp;
Sean Anderson98011e22022-03-23 14:04:49 -04002079 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(p), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00002080
2081 assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
2082 }
2083
2084 /* Also give back spare room at the end */
2085
2086 remainder_size = chunksize(p) - nb;
2087
2088 if (remainder_size >= (long)MINSIZE)
2089 {
2090 remainder = chunk_at_offset(p, nb);
2091 set_head(remainder, remainder_size | PREV_INUSE);
2092 set_head_size(p, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04002093 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
2094 false);
wdenk217c9da2002-10-25 20:35:49 +00002095 fREe(chunk2mem(remainder));
2096 }
2097
2098 check_inuse_chunk(p);
2099 return chunk2mem(p);
2100
2101}
2102
wdenk217c9da2002-10-25 20:35:49 +00002103
2104
Simon Glass7471cc72014-07-10 22:23:25 -06002105
wdenk217c9da2002-10-25 20:35:49 +00002106/*
2107 valloc just invokes memalign with alignment argument equal
2108 to the page size of the system (or as near to this as can
2109 be figured out from all the includes/defines above.)
2110*/
2111
2112#if __STD_C
2113Void_t* vALLOc(size_t bytes)
2114#else
2115Void_t* vALLOc(bytes) size_t bytes;
2116#endif
2117{
2118 return mEMALIGn (malloc_getpagesize, bytes);
2119}
2120
2121/*
2122 pvalloc just invokes valloc for the nearest pagesize
2123 that will accommodate request
2124*/
2125
2126
2127#if __STD_C
2128Void_t* pvALLOc(size_t bytes)
2129#else
2130Void_t* pvALLOc(bytes) size_t bytes;
2131#endif
2132{
2133 size_t pagesize = malloc_getpagesize;
2134 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2135}
2136
2137/*
2138
2139 calloc calls malloc, then zeroes out the allocated chunk.
2140
2141*/
2142
2143#if __STD_C
2144Void_t* cALLOc(size_t n, size_t elem_size)
2145#else
2146Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
2147#endif
2148{
2149 mchunkptr p;
2150 INTERNAL_SIZE_T csz;
2151
2152 INTERNAL_SIZE_T sz = n * elem_size;
2153
2154
2155 /* check if expand_top called, in which case don't need to clear */
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002156#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
wdenk217c9da2002-10-25 20:35:49 +00002157#if MORECORE_CLEARS
2158 mchunkptr oldtop = top;
2159 INTERNAL_SIZE_T oldtopsize = chunksize(top);
2160#endif
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002161#endif
wdenk217c9da2002-10-25 20:35:49 +00002162 Void_t* mem = mALLOc (sz);
2163
Kim Phillipsb052b602012-10-29 13:34:32 +00002164 if ((long)n < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00002165
Kim Phillipsb052b602012-10-29 13:34:32 +00002166 if (mem == NULL)
2167 return NULL;
wdenk217c9da2002-10-25 20:35:49 +00002168 else
2169 {
Andy Yan1fa20e4d2017-07-24 17:43:34 +08002170#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Simon Glass94890462014-11-10 17:16:43 -07002171 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Simon Goldschmidt6b890842019-10-25 21:23:35 +02002172 memset(mem, 0, sz);
Simon Glass863e4042014-07-10 22:23:28 -06002173 return mem;
2174 }
2175#endif
wdenk217c9da2002-10-25 20:35:49 +00002176 p = mem2chunk(mem);
2177
2178 /* Two optional cases in which clearing not necessary */
2179
2180
2181#if HAVE_MMAP
2182 if (chunk_is_mmapped(p)) return mem;
2183#endif
2184
2185 csz = chunksize(p);
2186
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002187#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
wdenk217c9da2002-10-25 20:35:49 +00002188#if MORECORE_CLEARS
2189 if (p == oldtop && csz > oldtopsize)
2190 {
2191 /* clear only the bytes from non-freshly-sbrked memory */
2192 csz = oldtopsize;
2193 }
2194#endif
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002195#endif
wdenk217c9da2002-10-25 20:35:49 +00002196
2197 MALLOC_ZERO(mem, csz - SIZE_SZ);
Sean Anderson98011e22022-03-23 14:04:49 -04002198 VALGRIND_MAKE_MEM_DEFINED(mem, sz);
wdenk217c9da2002-10-25 20:35:49 +00002199 return mem;
2200 }
2201}
2202
2203/*
2204
2205 cfree just calls free. It is needed/defined on some systems
2206 that pair it with calloc, presumably for odd historical reasons.
2207
2208*/
2209
2210#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2211#if __STD_C
2212void cfree(Void_t *mem)
2213#else
2214void cfree(mem) Void_t *mem;
2215#endif
2216{
2217 fREe(mem);
2218}
2219#endif
2220
Simon Glass7471cc72014-07-10 22:23:25 -06002221
wdenk217c9da2002-10-25 20:35:49 +00002222
2223/*
2224
2225 Malloc_trim gives memory back to the system (via negative
2226 arguments to sbrk) if there is unused memory at the `high' end of
2227 the malloc pool. You can call this after freeing large blocks of
2228 memory to potentially reduce the system-level memory requirements
2229 of a program. However, it cannot guarantee to reduce memory. Under
2230 some allocation patterns, some large free blocks of memory will be
2231 locked between two used chunks, so they cannot be given back to
2232 the system.
2233
2234 The `pad' argument to malloc_trim represents the amount of free
2235 trailing space to leave untrimmed. If this argument is zero,
2236 only the minimum amount of memory to maintain internal data
2237 structures will be left (one page or less). Non-zero arguments
2238 can be supplied to maintain enough trailing space to service
2239 future expected allocations without having to re-obtain memory
2240 from the system.
2241
2242 Malloc_trim returns 1 if it actually released any memory, else 0.
2243
2244*/
2245
2246#if __STD_C
2247int malloc_trim(size_t pad)
2248#else
2249int malloc_trim(pad) size_t pad;
2250#endif
2251{
2252 long top_size; /* Amount of top-most memory */
2253 long extra; /* Amount to release */
2254 char* current_brk; /* address returned by pre-check sbrk call */
2255 char* new_brk; /* address returned by negative sbrk call */
2256
2257 unsigned long pagesz = malloc_getpagesize;
2258
2259 top_size = chunksize(top);
2260 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2261
2262 if (extra < (long)pagesz) /* Not enough memory to release */
2263 return 0;
2264
2265 else
2266 {
2267 /* Test to make sure no one else called sbrk */
2268 current_brk = (char*)(MORECORE (0));
2269 if (current_brk != (char*)(top) + top_size)
2270 return 0; /* Apparently we don't own memory; must fail */
2271
2272 else
2273 {
2274 new_brk = (char*)(MORECORE (-extra));
2275
2276 if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2277 {
wdenk57b2d802003-06-27 21:31:46 +00002278 /* Try to figure out what we have */
2279 current_brk = (char*)(MORECORE (0));
2280 top_size = current_brk - (char*)top;
2281 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2282 {
2283 sbrked_mem = current_brk - sbrk_base;
2284 set_head(top, top_size | PREV_INUSE);
2285 }
2286 check_chunk(top);
2287 return 0;
wdenk217c9da2002-10-25 20:35:49 +00002288 }
2289
2290 else
2291 {
wdenk57b2d802003-06-27 21:31:46 +00002292 /* Success. Adjust top accordingly. */
2293 set_head(top, (top_size - extra) | PREV_INUSE);
2294 sbrked_mem -= extra;
2295 check_chunk(top);
2296 return 1;
wdenk217c9da2002-10-25 20:35:49 +00002297 }
2298 }
2299 }
2300}
2301
Simon Glass7471cc72014-07-10 22:23:25 -06002302
wdenk217c9da2002-10-25 20:35:49 +00002303
2304/*
2305 malloc_usable_size:
2306
2307 This routine tells you how many bytes you can actually use in an
2308 allocated chunk, which may be more than you requested (although
2309 often not). You can use this many bytes without worrying about
2310 overwriting other allocated objects. Not a particularly great
2311 programming practice, but still sometimes useful.
2312
2313*/
2314
2315#if __STD_C
2316size_t malloc_usable_size(Void_t* mem)
2317#else
2318size_t malloc_usable_size(mem) Void_t* mem;
2319#endif
2320{
2321 mchunkptr p;
Kim Phillipsb052b602012-10-29 13:34:32 +00002322 if (mem == NULL)
wdenk217c9da2002-10-25 20:35:49 +00002323 return 0;
2324 else
2325 {
2326 p = mem2chunk(mem);
2327 if(!chunk_is_mmapped(p))
2328 {
2329 if (!inuse(p)) return 0;
2330 check_inuse_chunk(p);
2331 return chunksize(p) - SIZE_SZ;
2332 }
2333 return chunksize(p) - 2*SIZE_SZ;
2334 }
2335}
2336
2337
Simon Glass7471cc72014-07-10 22:23:25 -06002338
wdenk217c9da2002-10-25 20:35:49 +00002339
2340/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2341
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002342#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +00002343static void malloc_update_mallinfo()
2344{
2345 int i;
2346 mbinptr b;
2347 mchunkptr p;
2348#ifdef DEBUG
2349 mchunkptr q;
2350#endif
2351
2352 INTERNAL_SIZE_T avail = chunksize(top);
2353 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2354
2355 for (i = 1; i < NAV; ++i)
2356 {
2357 b = bin_at(i);
2358 for (p = last(b); p != b; p = p->bk)
2359 {
2360#ifdef DEBUG
2361 check_free_chunk(p);
2362 for (q = next_chunk(p);
wdenk57b2d802003-06-27 21:31:46 +00002363 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2364 q = next_chunk(q))
2365 check_inuse_chunk(q);
wdenk217c9da2002-10-25 20:35:49 +00002366#endif
2367 avail += chunksize(p);
2368 navail++;
2369 }
2370 }
2371
2372 current_mallinfo.ordblks = navail;
2373 current_mallinfo.uordblks = sbrked_mem - avail;
2374 current_mallinfo.fordblks = avail;
2375 current_mallinfo.hblks = n_mmaps;
2376 current_mallinfo.hblkhd = mmapped_mem;
2377 current_mallinfo.keepcost = chunksize(top);
2378
2379}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002380#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002381
Simon Glass7471cc72014-07-10 22:23:25 -06002382
wdenk217c9da2002-10-25 20:35:49 +00002383
2384/*
2385
2386 malloc_stats:
2387
2388 Prints on the amount of space obtain from the system (both
2389 via sbrk and mmap), the maximum amount (which may be more than
2390 current if malloc_trim and/or munmap got called), the maximum
2391 number of simultaneous mmap regions used, and the current number
2392 of bytes allocated via malloc (or realloc, etc) but not yet
2393 freed. (Note that this is the number of bytes allocated, not the
2394 number requested. It will be larger than the number requested
2395 because of alignment and bookkeeping overhead.)
2396
2397*/
2398
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002399#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +00002400void malloc_stats()
2401{
2402 malloc_update_mallinfo();
2403 printf("max system bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002404 (unsigned int)(max_total_mem));
wdenk217c9da2002-10-25 20:35:49 +00002405 printf("system bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002406 (unsigned int)(sbrked_mem + mmapped_mem));
wdenk217c9da2002-10-25 20:35:49 +00002407 printf("in use bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002408 (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
wdenk217c9da2002-10-25 20:35:49 +00002409#if HAVE_MMAP
2410 printf("max mmap regions = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002411 (unsigned int)max_n_mmaps);
wdenk217c9da2002-10-25 20:35:49 +00002412#endif
2413}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002414#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002415
2416/*
2417 mallinfo returns a copy of updated current mallinfo.
2418*/
2419
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002420#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +00002421struct mallinfo mALLINFo()
2422{
2423 malloc_update_mallinfo();
2424 return current_mallinfo;
2425}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002426#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002427
2428
Simon Glass7471cc72014-07-10 22:23:25 -06002429
wdenk217c9da2002-10-25 20:35:49 +00002430
2431/*
2432 mallopt:
2433
2434 mallopt is the general SVID/XPG interface to tunable parameters.
2435 The format is to provide a (parameter-number, parameter-value) pair.
2436 mallopt then sets the corresponding parameter to the argument
2437 value if it can (i.e., so long as the value is meaningful),
2438 and returns 1 if successful else 0.
2439
2440 See descriptions of tunable parameters above.
2441
2442*/
2443
2444#if __STD_C
2445int mALLOPt(int param_number, int value)
2446#else
2447int mALLOPt(param_number, value) int param_number; int value;
2448#endif
2449{
2450 switch(param_number)
2451 {
2452 case M_TRIM_THRESHOLD:
2453 trim_threshold = value; return 1;
2454 case M_TOP_PAD:
2455 top_pad = value; return 1;
2456 case M_MMAP_THRESHOLD:
2457 mmap_threshold = value; return 1;
2458 case M_MMAP_MAX:
2459#if HAVE_MMAP
2460 n_mmaps_max = value; return 1;
2461#else
2462 if (value != 0) return 0; else n_mmaps_max = value; return 1;
2463#endif
2464
2465 default:
2466 return 0;
2467 }
2468}
2469
Simon Glassd1d087d2015-02-27 22:06:36 -07002470int initf_malloc(void)
2471{
Andy Yan1fa20e4d2017-07-24 17:43:34 +08002472#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Simon Glassd1d087d2015-02-27 22:06:36 -07002473 assert(gd->malloc_base); /* Set up by crt0.S */
Andy Yan1fa20e4d2017-07-24 17:43:34 +08002474 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN);
Simon Glassd1d087d2015-02-27 22:06:36 -07002475 gd->malloc_ptr = 0;
2476#endif
2477
2478 return 0;
2479}
2480
Simon Glass1a3e39b2022-09-06 20:27:00 -06002481void malloc_enable_testing(int max_allocs)
2482{
2483 malloc_testing = true;
2484 malloc_max_allocs = max_allocs;
2485}
2486
2487void malloc_disable_testing(void)
2488{
2489 malloc_testing = false;
2490}
2491
wdenk217c9da2002-10-25 20:35:49 +00002492/*
2493
2494History:
2495
2496 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
2497 * return null for negative arguments
2498 * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
wdenk57b2d802003-06-27 21:31:46 +00002499 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2500 (e.g. WIN32 platforms)
2501 * Cleanup up header file inclusion for WIN32 platforms
2502 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2503 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2504 memory allocation routines
2505 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2506 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
wdenk217c9da2002-10-25 20:35:49 +00002507 usage of 'assert' in non-WIN32 code
wdenk57b2d802003-06-27 21:31:46 +00002508 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2509 avoid infinite loop
wdenk217c9da2002-10-25 20:35:49 +00002510 * Always call 'fREe()' rather than 'free()'
2511
2512 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
2513 * Fixed ordering problem with boundary-stamping
2514
2515 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
2516 * Added pvalloc, as recommended by H.J. Liu
2517 * Added 64bit pointer support mainly from Wolfram Gloger
2518 * Added anonymously donated WIN32 sbrk emulation
2519 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2520 * malloc_extend_top: fix mask error that caused wastage after
wdenk57b2d802003-06-27 21:31:46 +00002521 foreign sbrks
wdenk217c9da2002-10-25 20:35:49 +00002522 * Add linux mremap support code from HJ Liu
2523
2524 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
2525 * Integrated most documentation with the code.
2526 * Add support for mmap, with help from
wdenk57b2d802003-06-27 21:31:46 +00002527 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
wdenk217c9da2002-10-25 20:35:49 +00002528 * Use last_remainder in more cases.
2529 * Pack bins using idea from colin@nyx10.cs.du.edu
2530 * Use ordered bins instead of best-fit threshhold
2531 * Eliminate block-local decls to simplify tracing and debugging.
2532 * Support another case of realloc via move into top
2533 * Fix error occuring when initial sbrk_base not word-aligned.
2534 * Rely on page size for units instead of SBRK_UNIT to
wdenk57b2d802003-06-27 21:31:46 +00002535 avoid surprises about sbrk alignment conventions.
wdenk217c9da2002-10-25 20:35:49 +00002536 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
wdenk57b2d802003-06-27 21:31:46 +00002537 (raymond@es.ele.tue.nl) for the suggestion.
wdenk217c9da2002-10-25 20:35:49 +00002538 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2539 * More precautions for cases where other routines call sbrk,
wdenk57b2d802003-06-27 21:31:46 +00002540 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
wdenk217c9da2002-10-25 20:35:49 +00002541 * Added macros etc., allowing use in linux libc from
wdenk57b2d802003-06-27 21:31:46 +00002542 H.J. Lu (hjl@gnu.ai.mit.edu)
wdenk217c9da2002-10-25 20:35:49 +00002543 * Inverted this history list
2544
2545 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
2546 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2547 * Removed all preallocation code since under current scheme
wdenk57b2d802003-06-27 21:31:46 +00002548 the work required to undo bad preallocations exceeds
2549 the work saved in good cases for most test programs.
wdenk217c9da2002-10-25 20:35:49 +00002550 * No longer use return list or unconsolidated bins since
wdenk57b2d802003-06-27 21:31:46 +00002551 no scheme using them consistently outperforms those that don't
2552 given above changes.
wdenk217c9da2002-10-25 20:35:49 +00002553 * Use best fit for very large chunks to prevent some worst-cases.
2554 * Added some support for debugging
2555
2556 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
2557 * Removed footers when chunks are in use. Thanks to
wdenk57b2d802003-06-27 21:31:46 +00002558 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
wdenk217c9da2002-10-25 20:35:49 +00002559
2560 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
2561 * Added malloc_trim, with help from Wolfram Gloger
wdenk57b2d802003-06-27 21:31:46 +00002562 (wmglo@Dent.MED.Uni-Muenchen.DE).
wdenk217c9da2002-10-25 20:35:49 +00002563
2564 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
2565
2566 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
2567 * realloc: try to expand in both directions
2568 * malloc: swap order of clean-bin strategy;
2569 * realloc: only conditionally expand backwards
2570 * Try not to scavenge used bins
2571 * Use bin counts as a guide to preallocation
2572 * Occasionally bin return list chunks in first scan
2573 * Add a few optimizations from colin@nyx10.cs.du.edu
2574
2575 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
2576 * faster bin computation & slightly different binning
2577 * merged all consolidations to one part of malloc proper
wdenk57b2d802003-06-27 21:31:46 +00002578 (eliminating old malloc_find_space & malloc_clean_bin)
wdenk217c9da2002-10-25 20:35:49 +00002579 * Scan 2 returns chunks (not just 1)
2580 * Propagate failure in realloc if malloc returns 0
2581 * Add stuff to allow compilation on non-ANSI compilers
wdenk57b2d802003-06-27 21:31:46 +00002582 from kpv@research.att.com
wdenk217c9da2002-10-25 20:35:49 +00002583
2584 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
2585 * removed potential for odd address access in prev_chunk
2586 * removed dependency on getpagesize.h
2587 * misc cosmetics and a bit more internal documentation
2588 * anticosmetics: mangled names in macros to evade debugger strangeness
2589 * tested on sparc, hp-700, dec-mips, rs6000
wdenk57b2d802003-06-27 21:31:46 +00002590 with gcc & native cc (hp, dec only) allowing
2591 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
wdenk217c9da2002-10-25 20:35:49 +00002592
2593 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
2594 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
wdenk57b2d802003-06-27 21:31:46 +00002595 structure of old version, but most details differ.)
wdenk217c9da2002-10-25 20:35:49 +00002596
2597*/