blob: a0616217d4953599b953a6571935f257e66b6d17 [file] [log] [blame]
Heinrich Schuchardt862dd212021-05-29 13:18:00 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This code is based on a version (aka dlmalloc) of malloc/free/realloc written
4 * by Doug Lea and released to the public domain, as explained at
5 * http://creativecommons.org/publicdomain/zero/1.0/-
6 *
7 * The original code is available at http://gee.cs.oswego.edu/pub/misc/
8 * as file malloc-2.6.6.c.
9 */
10
Heinrich Schuchardt28105f42020-04-15 18:46:23 +020011#if CONFIG_IS_ENABLED(UNIT_TEST)
Simon Glass05c86002014-07-10 22:23:33 -060012#define DEBUG
13#endif
14
Tom Riniabb9a042024-05-18 20:20:43 -060015#include <common.h>
Sean Anderson8b2a4812023-10-07 22:01:56 -040016#include <log.h>
17#include <asm/global_data.h>
18
wdenk217c9da2002-10-25 20:35:49 +000019#include <malloc.h>
Simon Glass863e4042014-07-10 22:23:28 -060020#include <asm/io.h>
Sean Anderson98011e22022-03-23 14:04:49 -040021#include <valgrind/memcheck.h>
Simon Glass863e4042014-07-10 22:23:28 -060022
Wolfgang Denk460a9ff2010-06-20 23:33:59 +020023#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +000024#if __STD_C
25static void malloc_update_mallinfo (void);
26void malloc_stats (void);
27#else
28static void malloc_update_mallinfo ();
29void malloc_stats();
30#endif
Wolfgang Denk460a9ff2010-06-20 23:33:59 +020031#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +000032
Wolfgang Denk6405a152006-03-31 18:32:53 +020033DECLARE_GLOBAL_DATA_PTR;
34
Eugene Uriev33bd33d2024-03-31 23:03:19 +030035#ifdef MCHECK_HEAP_PROTECTION
36 #define STATIC_IF_MCHECK static
Eugene Uriev5a1d9682024-03-31 23:03:20 +030037 #undef MALLOC_COPY
38 #undef MALLOC_ZERO
39static inline void MALLOC_ZERO(void *p, size_t sz) { memset(p, 0, sz); }
40static inline void MALLOC_COPY(void *dest, const void *src, size_t sz) { memcpy(dest, src, sz); }
Eugene Uriev33bd33d2024-03-31 23:03:19 +030041#else
42 #define STATIC_IF_MCHECK
43 #define mALLOc_impl mALLOc
44 #define fREe_impl fREe
45 #define rEALLOc_impl rEALLOc
46 #define mEMALIGn_impl mEMALIGn
47 #define cALLOc_impl cALLOc
48#endif
49
wdenk217c9da2002-10-25 20:35:49 +000050/*
51 Emulation of sbrk for WIN32
52 All code within the ifdef WIN32 is untested by me.
53
54 Thanks to Martin Fong and others for supplying this.
55*/
56
57
58#ifdef WIN32
59
60#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
61~(malloc_getpagesize-1))
62#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
63
64/* resrve 64MB to insure large contiguous space */
65#define RESERVED_SIZE (1024*1024*64)
66#define NEXT_SIZE (2048*1024)
67#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
68
69struct GmListElement;
70typedef struct GmListElement GmListElement;
71
72struct GmListElement
73{
74 GmListElement* next;
75 void* base;
76};
77
78static GmListElement* head = 0;
79static unsigned int gNextAddress = 0;
80static unsigned int gAddressBase = 0;
81static unsigned int gAllocatedSize = 0;
82
83static
84GmListElement* makeGmListElement (void* bas)
85{
86 GmListElement* this;
87 this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
88 assert (this);
89 if (this)
90 {
91 this->base = bas;
92 this->next = head;
93 head = this;
94 }
95 return this;
96}
97
Tom Rini03787a92023-02-27 17:08:34 -050098void gcleanup (void)
wdenk217c9da2002-10-25 20:35:49 +000099{
100 BOOL rval;
101 assert ( (head == NULL) || (head->base == (void*)gAddressBase));
102 if (gAddressBase && (gNextAddress - gAddressBase))
103 {
104 rval = VirtualFree ((void*)gAddressBase,
105 gNextAddress - gAddressBase,
106 MEM_DECOMMIT);
wdenk57b2d802003-06-27 21:31:46 +0000107 assert (rval);
wdenk217c9da2002-10-25 20:35:49 +0000108 }
109 while (head)
110 {
111 GmListElement* next = head->next;
112 rval = VirtualFree (head->base, 0, MEM_RELEASE);
113 assert (rval);
114 LocalFree (head);
115 head = next;
116 }
117}
118
119static
120void* findRegion (void* start_address, unsigned long size)
121{
122 MEMORY_BASIC_INFORMATION info;
123 if (size >= TOP_MEMORY) return NULL;
124
125 while ((unsigned long)start_address + size < TOP_MEMORY)
126 {
127 VirtualQuery (start_address, &info, sizeof (info));
128 if ((info.State == MEM_FREE) && (info.RegionSize >= size))
129 return start_address;
130 else
131 {
wdenk57b2d802003-06-27 21:31:46 +0000132 /* Requested region is not available so see if the */
133 /* next region is available. Set 'start_address' */
134 /* to the next region and call 'VirtualQuery()' */
135 /* again. */
wdenk217c9da2002-10-25 20:35:49 +0000136
137 start_address = (char*)info.BaseAddress + info.RegionSize;
138
wdenk57b2d802003-06-27 21:31:46 +0000139 /* Make sure we start looking for the next region */
140 /* on the *next* 64K boundary. Otherwise, even if */
141 /* the new region is free according to */
142 /* 'VirtualQuery()', the subsequent call to */
143 /* 'VirtualAlloc()' (which follows the call to */
144 /* this routine in 'wsbrk()') will round *down* */
145 /* the requested address to a 64K boundary which */
146 /* we already know is an address in the */
147 /* unavailable region. Thus, the subsequent call */
148 /* to 'VirtualAlloc()' will fail and bring us back */
149 /* here, causing us to go into an infinite loop. */
wdenk217c9da2002-10-25 20:35:49 +0000150
151 start_address =
152 (void *) AlignPage64K((unsigned long) start_address);
153 }
154 }
155 return NULL;
156
157}
158
159
160void* wsbrk (long size)
161{
162 void* tmp;
163 if (size > 0)
164 {
165 if (gAddressBase == 0)
166 {
167 gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
168 gNextAddress = gAddressBase =
169 (unsigned int)VirtualAlloc (NULL, gAllocatedSize,
170 MEM_RESERVE, PAGE_NOACCESS);
171 } else if (AlignPage (gNextAddress + size) > (gAddressBase +
172gAllocatedSize))
173 {
174 long new_size = max (NEXT_SIZE, AlignPage (size));
175 void* new_address = (void*)(gAddressBase+gAllocatedSize);
176 do
177 {
178 new_address = findRegion (new_address, new_size);
179
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +0100180 if (!new_address)
wdenk217c9da2002-10-25 20:35:49 +0000181 return (void*)-1;
182
183 gAddressBase = gNextAddress =
184 (unsigned int)VirtualAlloc (new_address, new_size,
185 MEM_RESERVE, PAGE_NOACCESS);
wdenk57b2d802003-06-27 21:31:46 +0000186 /* repeat in case of race condition */
187 /* The region that we found has been snagged */
188 /* by another thread */
wdenk217c9da2002-10-25 20:35:49 +0000189 }
190 while (gAddressBase == 0);
191
192 assert (new_address == (void*)gAddressBase);
193
194 gAllocatedSize = new_size;
195
196 if (!makeGmListElement ((void*)gAddressBase))
197 return (void*)-1;
198 }
199 if ((size + gNextAddress) > AlignPage (gNextAddress))
200 {
201 void* res;
202 res = VirtualAlloc ((void*)AlignPage (gNextAddress),
203 (size + gNextAddress -
204 AlignPage (gNextAddress)),
205 MEM_COMMIT, PAGE_READWRITE);
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +0100206 if (!res)
wdenk217c9da2002-10-25 20:35:49 +0000207 return (void*)-1;
208 }
209 tmp = (void*)gNextAddress;
210 gNextAddress = (unsigned int)tmp + size;
211 return tmp;
212 }
213 else if (size < 0)
214 {
215 unsigned int alignedGoal = AlignPage (gNextAddress + size);
216 /* Trim by releasing the virtual memory */
217 if (alignedGoal >= gAddressBase)
218 {
219 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
220 MEM_DECOMMIT);
221 gNextAddress = gNextAddress + size;
222 return (void*)gNextAddress;
223 }
224 else
225 {
226 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
227 MEM_DECOMMIT);
228 gNextAddress = gAddressBase;
229 return (void*)-1;
230 }
231 }
232 else
233 {
234 return (void*)gNextAddress;
235 }
236}
237
238#endif
239
Simon Glass7471cc72014-07-10 22:23:25 -0600240
wdenk217c9da2002-10-25 20:35:49 +0000241
242/*
243 Type declarations
244*/
245
246
247struct malloc_chunk
248{
249 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
250 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
251 struct malloc_chunk* fd; /* double links -- used only if free. */
252 struct malloc_chunk* bk;
Joakim Tjernlundc183eea2010-10-14 08:51:34 +0200253} __attribute__((__may_alias__)) ;
wdenk217c9da2002-10-25 20:35:49 +0000254
255typedef struct malloc_chunk* mchunkptr;
256
257/*
258
259 malloc_chunk details:
260
261 (The following includes lightly edited explanations by Colin Plumb.)
262
263 Chunks of memory are maintained using a `boundary tag' method as
264 described in e.g., Knuth or Standish. (See the paper by Paul
265 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
266 survey of such techniques.) Sizes of free chunks are stored both
267 in the front of each chunk and at the end. This makes
268 consolidating fragmented chunks into bigger chunks very fast. The
269 size fields also hold bits representing whether chunks are free or
270 in use.
271
272 An allocated chunk looks like this:
273
274
275 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000276 | Size of previous chunk, if allocated | |
277 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
278 | Size of chunk, in bytes |P|
wdenk217c9da2002-10-25 20:35:49 +0000279 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000280 | User data starts here... .
281 . .
282 . (malloc_usable_space() bytes) .
283 . |
wdenk217c9da2002-10-25 20:35:49 +0000284nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000285 | Size of chunk |
286 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000287
288
289 Where "chunk" is the front of the chunk for the purpose of most of
290 the malloc code, but "mem" is the pointer that is returned to the
291 user. "Nextchunk" is the beginning of the next contiguous chunk.
292
293 Chunks always begin on even word boundries, so the mem portion
294 (which is returned to the user) is also on an even word boundary, and
295 thus double-word aligned.
296
297 Free chunks are stored in circular doubly-linked lists, and look like this:
298
299 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000300 | Size of previous chunk |
301 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000302 `head:' | Size of chunk, in bytes |P|
303 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000304 | Forward pointer to next chunk in list |
305 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
306 | Back pointer to previous chunk in list |
307 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
308 | Unused space (may be 0 bytes long) .
309 . .
310 . |
Marek Bykowskib4032a72020-04-29 18:23:07 +0200311
wdenk217c9da2002-10-25 20:35:49 +0000312nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
313 `foot:' | Size of chunk, in bytes |
wdenk57b2d802003-06-27 21:31:46 +0000314 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000315
316 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
317 chunk size (which is always a multiple of two words), is an in-use
318 bit for the *previous* chunk. If that bit is *clear*, then the
319 word before the current chunk size contains the previous chunk
320 size, and can be used to find the front of the previous chunk.
321 (The very first chunk allocated always has this bit set,
322 preventing access to non-existent (or non-owned) memory.)
323
324 Note that the `foot' of the current chunk is actually represented
325 as the prev_size of the NEXT chunk. (This makes it easier to
326 deal with alignments etc).
327
328 The two exceptions to all this are
329
330 1. The special chunk `top', which doesn't bother using the
wdenk57b2d802003-06-27 21:31:46 +0000331 trailing size field since there is no
332 next contiguous chunk that would have to index off it. (After
333 initialization, `top' is forced to always exist. If it would
334 become less than MINSIZE bytes long, it is replenished via
335 malloc_extend_top.)
wdenk217c9da2002-10-25 20:35:49 +0000336
337 2. Chunks allocated via mmap, which have the second-lowest-order
wdenk57b2d802003-06-27 21:31:46 +0000338 bit (IS_MMAPPED) set in their size fields. Because they are
339 never merged or traversed from any other chunk, they have no
340 foot size or inuse information.
wdenk217c9da2002-10-25 20:35:49 +0000341
342 Available chunks are kept in any of several places (all declared below):
343
344 * `av': An array of chunks serving as bin headers for consolidated
345 chunks. Each bin is doubly linked. The bins are approximately
346 proportionally (log) spaced. There are a lot of these bins
347 (128). This may look excessive, but works very well in
348 practice. All procedures maintain the invariant that no
349 consolidated chunk physically borders another one. Chunks in
350 bins are kept in size order, with ties going to the
351 approximately least recently used chunk.
352
353 The chunks in each bin are maintained in decreasing sorted order by
354 size. This is irrelevant for the small bins, which all contain
355 the same-sized chunks, but facilitates best-fit allocation for
356 larger chunks. (These lists are just sequential. Keeping them in
357 order almost never requires enough traversal to warrant using
358 fancier ordered data structures.) Chunks of the same size are
359 linked with the most recently freed at the front, and allocations
360 are taken from the back. This results in LRU or FIFO allocation
361 order, which tends to give each chunk an equal opportunity to be
362 consolidated with adjacent freed chunks, resulting in larger free
363 chunks and less fragmentation.
364
365 * `top': The top-most available chunk (i.e., the one bordering the
366 end of available memory) is treated specially. It is never
367 included in any bin, is used only if no other chunk is
368 available, and is released back to the system if it is very
369 large (see M_TRIM_THRESHOLD).
370
371 * `last_remainder': A bin holding only the remainder of the
372 most recently split (non-top) chunk. This bin is checked
373 before other non-fitting chunks, so as to provide better
374 locality for runs of sequentially allocated chunks.
375
376 * Implicitly, through the host system's memory mapping tables.
377 If supported, requests greater than a threshold are usually
378 serviced via calls to mmap, and then later released via munmap.
379
380*/
Simon Glass7471cc72014-07-10 22:23:25 -0600381
wdenk217c9da2002-10-25 20:35:49 +0000382/* sizes, alignments */
383
384#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
385#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
386#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
387#define MINSIZE (sizeof(struct malloc_chunk))
388
389/* conversion from malloc headers to user pointers, and back */
390
391#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
392#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
393
394/* pad request bytes into a usable size */
395
396#define request2size(req) \
397 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
398 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
399 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
400
401/* Check if m has acceptable alignment */
402
403#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
404
405
Simon Glass7471cc72014-07-10 22:23:25 -0600406
wdenk217c9da2002-10-25 20:35:49 +0000407
408/*
409 Physical chunk operations
410*/
411
412
413/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
414
415#define PREV_INUSE 0x1
416
417/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
418
419#define IS_MMAPPED 0x2
420
421/* Bits to mask off when extracting size */
422
423#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
424
425
426/* Ptr to next physical malloc_chunk. */
427
428#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
429
430/* Ptr to previous physical malloc_chunk */
431
432#define prev_chunk(p)\
433 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
434
435
436/* Treat space at ptr + offset as a chunk */
437
438#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
439
440
Simon Glass7471cc72014-07-10 22:23:25 -0600441
wdenk217c9da2002-10-25 20:35:49 +0000442
443/*
444 Dealing with use bits
445*/
446
447/* extract p's inuse bit */
448
449#define inuse(p)\
450((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
451
452/* extract inuse bit of previous chunk */
453
454#define prev_inuse(p) ((p)->size & PREV_INUSE)
455
456/* check for mmap()'ed chunk */
457
458#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
459
460/* set/clear chunk as in use without otherwise disturbing */
461
462#define set_inuse(p)\
463((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
464
465#define clear_inuse(p)\
466((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
467
468/* check/set/clear inuse bits in known places */
469
470#define inuse_bit_at_offset(p, s)\
471 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
472
473#define set_inuse_bit_at_offset(p, s)\
474 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
475
476#define clear_inuse_bit_at_offset(p, s)\
477 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
478
479
Simon Glass7471cc72014-07-10 22:23:25 -0600480
wdenk217c9da2002-10-25 20:35:49 +0000481
482/*
483 Dealing with size fields
484*/
485
486/* Get size, ignoring use bits */
487
488#define chunksize(p) ((p)->size & ~(SIZE_BITS))
489
490/* Set size at head, without disturbing its use bit */
491
492#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
493
494/* Set size/use ignoring previous bits in header */
495
496#define set_head(p, s) ((p)->size = (s))
497
498/* Set size at footer (only when chunk is not in use) */
499
500#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
501
502
wdenk217c9da2002-10-25 20:35:49 +0000503
504
Simon Glass7471cc72014-07-10 22:23:25 -0600505
wdenk217c9da2002-10-25 20:35:49 +0000506/*
507 Bins
508
509 The bins, `av_' are an array of pairs of pointers serving as the
510 heads of (initially empty) doubly-linked lists of chunks, laid out
511 in a way so that each pair can be treated as if it were in a
512 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
513 and chunks are the same).
514
515 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
516 8 bytes apart. Larger bins are approximately logarithmically
517 spaced. (See the table below.) The `av_' array is never mentioned
518 directly in the code, but instead via bin access macros.
519
520 Bin layout:
521
522 64 bins of size 8
523 32 bins of size 64
524 16 bins of size 512
525 8 bins of size 4096
526 4 bins of size 32768
527 2 bins of size 262144
528 1 bin of size what's left
529
530 There is actually a little bit of slop in the numbers in bin_index
531 for the sake of speed. This makes no difference elsewhere.
532
533 The special chunks `top' and `last_remainder' get their own bins,
534 (this is implemented via yet more trickery with the av_ array),
535 although `top' is never properly linked to its bin since it is
536 always handled specially.
537
538*/
539
540#define NAV 128 /* number of bins */
541
542typedef struct malloc_chunk* mbinptr;
543
544/* access macros */
545
546#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
547#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
548#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
549
550/*
551 The first 2 bins are never indexed. The corresponding av_ cells are instead
552 used for bookkeeping. This is not to save space, but to simplify
553 indexing, maintain locality, and avoid some initialization tests.
554*/
555
Stefan Roese37628252008-08-06 14:05:38 +0200556#define top (av_[2]) /* The topmost chunk */
wdenk217c9da2002-10-25 20:35:49 +0000557#define last_remainder (bin_at(1)) /* remainder from last split */
558
559
560/*
561 Because top initially points to its own bin with initial
562 zero size, thus forcing extension on the first malloc request,
563 we avoid having any special code in malloc to check whether
564 it even exists yet. But we still need to in malloc_extend_top.
565*/
566
567#define initial_top ((mchunkptr)(bin_at(0)))
568
569/* Helper macro to initialize bins */
570
571#define IAV(i) bin_at(i), bin_at(i)
572
573static mbinptr av_[NAV * 2 + 2] = {
Kim Phillipsb052b602012-10-29 13:34:32 +0000574 NULL, NULL,
wdenk217c9da2002-10-25 20:35:49 +0000575 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
576 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
577 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
578 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
579 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
580 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
581 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
582 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
583 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
584 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
585 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
586 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
587 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
588 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
589 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
590 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
591};
592
Marek Bykowskib4032a72020-04-29 18:23:07 +0200593#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
594static void malloc_init(void);
595#endif
596
Peter Tysera78ded62009-08-21 23:05:19 -0500597ulong mem_malloc_start = 0;
598ulong mem_malloc_end = 0;
599ulong mem_malloc_brk = 0;
600
Simon Glass1a3e39b2022-09-06 20:27:00 -0600601static bool malloc_testing; /* enable test mode */
602static int malloc_max_allocs; /* return NULL after this many calls to malloc() */
603
Peter Tysera78ded62009-08-21 23:05:19 -0500604void *sbrk(ptrdiff_t increment)
605{
606 ulong old = mem_malloc_brk;
607 ulong new = old + increment;
608
Kumar Gala293d7ad2010-11-15 18:41:43 -0600609 /*
610 * if we are giving memory back make sure we clear it out since
611 * we set MORECORE_CLEARS to 1
612 */
613 if (increment < 0)
614 memset((void *)new, 0, -increment);
615
Peter Tysera78ded62009-08-21 23:05:19 -0500616 if ((new < mem_malloc_start) || (new > mem_malloc_end))
karl.beldan@gmail.com34e50882010-04-06 22:18:08 +0200617 return (void *)MORECORE_FAILURE;
Peter Tysera78ded62009-08-21 23:05:19 -0500618
619 mem_malloc_brk = new;
620
621 return (void *)old;
622}
wdenk217c9da2002-10-25 20:35:49 +0000623
Peter Tyser781c9b82009-08-21 23:05:21 -0500624void mem_malloc_init(ulong start, ulong size)
625{
626 mem_malloc_start = start;
627 mem_malloc_end = start + size;
628 mem_malloc_brk = start;
629
Marek Bykowskib4032a72020-04-29 18:23:07 +0200630#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
631 malloc_init();
632#endif
633
Thierry Reding8023ec22014-08-26 17:34:22 +0200634 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start,
635 mem_malloc_end);
Shengyu Qu10a3e612023-08-25 00:25:19 +0800636#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
Przemyslaw Marczak88436782015-03-04 14:01:24 +0100637 memset((void *)mem_malloc_start, 0x0, size);
638#endif
Peter Tyser781c9b82009-08-21 23:05:21 -0500639}
Peter Tyser781c9b82009-08-21 23:05:21 -0500640
wdenk217c9da2002-10-25 20:35:49 +0000641/* field-extraction macros */
642
643#define first(b) ((b)->fd)
644#define last(b) ((b)->bk)
645
646/*
647 Indexing into bins
648*/
649
650#define bin_index(sz) \
651(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
652 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
653 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
654 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
655 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
656 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
wdenk57b2d802003-06-27 21:31:46 +0000657 126)
wdenk217c9da2002-10-25 20:35:49 +0000658/*
659 bins for chunks < 512 are all spaced 8 bytes apart, and hold
660 identically sized chunks. This is exploited in malloc.
661*/
662
663#define MAX_SMALLBIN 63
664#define MAX_SMALLBIN_SIZE 512
665#define SMALLBIN_WIDTH 8
666
667#define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
668
669/*
670 Requests are `small' if both the corresponding and the next bin are small
671*/
672
673#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
674
Simon Glass7471cc72014-07-10 22:23:25 -0600675
wdenk217c9da2002-10-25 20:35:49 +0000676
677/*
678 To help compensate for the large number of bins, a one-level index
679 structure is used for bin-by-bin searching. `binblocks' is a
680 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
681 have any (possibly) non-empty bins, so they can be skipped over
682 all at once during during traversals. The bits are NOT always
683 cleared as soon as all bins in a block are empty, but instead only
684 when all are noticed to be empty during traversal in malloc.
685*/
686
687#define BINBLOCKWIDTH 4 /* bins per block */
688
Stefan Roese37628252008-08-06 14:05:38 +0200689#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */
690#define binblocks_w (av_[1])
wdenk217c9da2002-10-25 20:35:49 +0000691
692/* bin<->block macros */
693
694#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH))
Stefan Roese37628252008-08-06 14:05:38 +0200695#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
696#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
wdenk217c9da2002-10-25 20:35:49 +0000697
698
Simon Glass7471cc72014-07-10 22:23:25 -0600699
wdenk217c9da2002-10-25 20:35:49 +0000700
701
702/* Other static bookkeeping data */
703
704/* variables holding tunable values */
705
706static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
707static unsigned long top_pad = DEFAULT_TOP_PAD;
708static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
709static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
710
711/* The first value returned from sbrk */
712static char* sbrk_base = (char*)(-1);
713
714/* The maximum memory obtained from system via sbrk */
715static unsigned long max_sbrked_mem = 0;
716
717/* The maximum via either sbrk or mmap */
718static unsigned long max_total_mem = 0;
719
720/* internal working copy of mallinfo */
721static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
722
723/* The total memory obtained from system via sbrk */
724#define sbrked_mem (current_mallinfo.arena)
725
726/* Tracking mmaps */
727
Wolfgang Denk460a9ff2010-06-20 23:33:59 +0200728#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +0000729static unsigned int n_mmaps = 0;
Wolfgang Denk460a9ff2010-06-20 23:33:59 +0200730#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +0000731static unsigned long mmapped_mem = 0;
732#if HAVE_MMAP
733static unsigned int max_n_mmaps = 0;
734static unsigned long max_mmapped_mem = 0;
735#endif
736
Marek Bykowskib4032a72020-04-29 18:23:07 +0200737#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
738static void malloc_init(void)
739{
740 int i, j;
741
742 debug("bins (av_ array) are at %p\n", (void *)av_);
743
744 av_[0] = NULL; av_[1] = NULL;
745 for (i = 2, j = 2; i < NAV * 2 + 2; i += 2, j++) {
746 av_[i] = bin_at(j - 2);
747 av_[i + 1] = bin_at(j - 2);
Simon Glass7471cc72014-07-10 22:23:25 -0600748
Marek Bykowskib4032a72020-04-29 18:23:07 +0200749 /* Just print the first few bins so that
750 * we can see there are alright.
751 */
752 if (i < 10)
753 debug("av_[%d]=%lx av_[%d]=%lx\n",
754 i, (ulong)av_[i],
755 i + 1, (ulong)av_[i + 1]);
756 }
757
758 /* Init the static bookkeeping as well */
759 sbrk_base = (char *)(-1);
760 max_sbrked_mem = 0;
761 max_total_mem = 0;
762#ifdef DEBUG
763 memset((void *)&current_mallinfo, 0, sizeof(struct mallinfo));
764#endif
765}
766#endif
wdenk217c9da2002-10-25 20:35:49 +0000767
768/*
769 Debugging support
770*/
771
772#ifdef DEBUG
773
774
775/*
776 These routines make a number of assertions about the states
777 of data structures that should be true at all times. If any
778 are not true, it's very likely that a user program has somehow
779 trashed memory. (It's also possible that there is a coding error
780 in malloc. In which case, please report it!)
781*/
782
783#if __STD_C
784static void do_check_chunk(mchunkptr p)
785#else
786static void do_check_chunk(p) mchunkptr p;
787#endif
788{
wdenk217c9da2002-10-25 20:35:49 +0000789 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +0000790
791 /* No checkable chunk is mmapped */
792 assert(!chunk_is_mmapped(p));
793
794 /* Check for legal address ... */
795 assert((char*)p >= sbrk_base);
796 if (p != top)
797 assert((char*)p + sz <= (char*)top);
798 else
799 assert((char*)p + sz <= sbrk_base + sbrked_mem);
800
801}
802
803
804#if __STD_C
805static void do_check_free_chunk(mchunkptr p)
806#else
807static void do_check_free_chunk(p) mchunkptr p;
808#endif
809{
810 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +0000811 mchunkptr next = chunk_at_offset(p, sz);
wdenk217c9da2002-10-25 20:35:49 +0000812
813 do_check_chunk(p);
814
815 /* Check whether it claims to be free ... */
816 assert(!inuse(p));
817
818 /* Unless a special marker, must have OK fields */
819 if ((long)sz >= (long)MINSIZE)
820 {
821 assert((sz & MALLOC_ALIGN_MASK) == 0);
822 assert(aligned_OK(chunk2mem(p)));
823 /* ... matching footer field */
824 assert(next->prev_size == sz);
825 /* ... and is fully consolidated */
826 assert(prev_inuse(p));
827 assert (next == top || inuse(next));
828
829 /* ... and has minimally sane links */
830 assert(p->fd->bk == p);
831 assert(p->bk->fd == p);
832 }
833 else /* markers are always of size SIZE_SZ */
834 assert(sz == SIZE_SZ);
835}
836
837#if __STD_C
838static void do_check_inuse_chunk(mchunkptr p)
839#else
840static void do_check_inuse_chunk(p) mchunkptr p;
841#endif
842{
843 mchunkptr next = next_chunk(p);
844 do_check_chunk(p);
845
846 /* Check whether it claims to be in use ... */
847 assert(inuse(p));
848
849 /* ... and is surrounded by OK chunks.
850 Since more things can be checked with free chunks than inuse ones,
851 if an inuse chunk borders them and debug is on, it's worth doing them.
852 */
853 if (!prev_inuse(p))
854 {
855 mchunkptr prv = prev_chunk(p);
856 assert(next_chunk(prv) == p);
857 do_check_free_chunk(prv);
858 }
859 if (next == top)
860 {
861 assert(prev_inuse(next));
862 assert(chunksize(next) >= MINSIZE);
863 }
864 else if (!inuse(next))
865 do_check_free_chunk(next);
866
867}
868
869#if __STD_C
870static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
871#else
872static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
873#endif
874{
wdenk217c9da2002-10-25 20:35:49 +0000875 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
876 long room = sz - s;
wdenk217c9da2002-10-25 20:35:49 +0000877
878 do_check_inuse_chunk(p);
879
880 /* Legal size ... */
881 assert((long)sz >= (long)MINSIZE);
882 assert((sz & MALLOC_ALIGN_MASK) == 0);
883 assert(room >= 0);
884 assert(room < (long)MINSIZE);
885
886 /* ... and alignment */
887 assert(aligned_OK(chunk2mem(p)));
888
889
890 /* ... and was allocated at front of an available chunk */
891 assert(prev_inuse(p));
892
893}
894
895
896#define check_free_chunk(P) do_check_free_chunk(P)
897#define check_inuse_chunk(P) do_check_inuse_chunk(P)
898#define check_chunk(P) do_check_chunk(P)
899#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
900#else
901#define check_free_chunk(P)
902#define check_inuse_chunk(P)
903#define check_chunk(P)
904#define check_malloced_chunk(P,N)
905#endif
906
Simon Glass7471cc72014-07-10 22:23:25 -0600907
wdenk217c9da2002-10-25 20:35:49 +0000908
909/*
910 Macro-based internal utilities
911*/
912
913
914/*
915 Linking chunks in bin lists.
916 Call these only with variables, not arbitrary expressions, as arguments.
917*/
918
919/*
920 Place chunk p of size s in its bin, in size order,
921 putting it ahead of others of same size.
922*/
923
924
925#define frontlink(P, S, IDX, BK, FD) \
926{ \
927 if (S < MAX_SMALLBIN_SIZE) \
928 { \
929 IDX = smallbin_index(S); \
930 mark_binblock(IDX); \
931 BK = bin_at(IDX); \
932 FD = BK->fd; \
933 P->bk = BK; \
934 P->fd = FD; \
935 FD->bk = BK->fd = P; \
936 } \
937 else \
938 { \
939 IDX = bin_index(S); \
940 BK = bin_at(IDX); \
941 FD = BK->fd; \
942 if (FD == BK) mark_binblock(IDX); \
943 else \
944 { \
945 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
946 BK = FD->bk; \
947 } \
948 P->bk = BK; \
949 P->fd = FD; \
950 FD->bk = BK->fd = P; \
951 } \
952}
953
954
955/* take a chunk off a list */
956
957#define unlink(P, BK, FD) \
958{ \
959 BK = P->bk; \
960 FD = P->fd; \
961 FD->bk = BK; \
962 BK->fd = FD; \
963} \
964
965/* Place p as the last remainder */
966
967#define link_last_remainder(P) \
968{ \
969 last_remainder->fd = last_remainder->bk = P; \
970 P->fd = P->bk = last_remainder; \
971}
972
973/* Clear the last_remainder bin */
974
975#define clear_last_remainder \
976 (last_remainder->fd = last_remainder->bk = last_remainder)
977
978
wdenk217c9da2002-10-25 20:35:49 +0000979
980
Simon Glass7471cc72014-07-10 22:23:25 -0600981
wdenk217c9da2002-10-25 20:35:49 +0000982/* Routines dealing with mmap(). */
983
984#if HAVE_MMAP
985
986#if __STD_C
987static mchunkptr mmap_chunk(size_t size)
988#else
989static mchunkptr mmap_chunk(size) size_t size;
990#endif
991{
992 size_t page_mask = malloc_getpagesize - 1;
993 mchunkptr p;
994
995#ifndef MAP_ANONYMOUS
996 static int fd = -1;
997#endif
998
999 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
1000
1001 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
1002 * there is no following chunk whose prev_size field could be used.
1003 */
1004 size = (size + SIZE_SZ + page_mask) & ~page_mask;
1005
1006#ifdef MAP_ANONYMOUS
1007 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
1008 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1009#else /* !MAP_ANONYMOUS */
1010 if (fd < 0)
1011 {
1012 fd = open("/dev/zero", O_RDWR);
1013 if(fd < 0) return 0;
1014 }
1015 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1016#endif
1017
1018 if(p == (mchunkptr)-1) return 0;
1019
1020 n_mmaps++;
1021 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1022
1023 /* We demand that eight bytes into a page must be 8-byte aligned. */
1024 assert(aligned_OK(chunk2mem(p)));
1025
1026 /* The offset to the start of the mmapped region is stored
1027 * in the prev_size field of the chunk; normally it is zero,
1028 * but that can be changed in memalign().
1029 */
1030 p->prev_size = 0;
1031 set_head(p, size|IS_MMAPPED);
1032
1033 mmapped_mem += size;
1034 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1035 max_mmapped_mem = mmapped_mem;
1036 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1037 max_total_mem = mmapped_mem + sbrked_mem;
1038 return p;
1039}
1040
1041#if __STD_C
1042static void munmap_chunk(mchunkptr p)
1043#else
1044static void munmap_chunk(p) mchunkptr p;
1045#endif
1046{
1047 INTERNAL_SIZE_T size = chunksize(p);
1048 int ret;
1049
1050 assert (chunk_is_mmapped(p));
1051 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1052 assert((n_mmaps > 0));
1053 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1054
1055 n_mmaps--;
1056 mmapped_mem -= (size + p->prev_size);
1057
1058 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1059
1060 /* munmap returns non-zero on failure */
1061 assert(ret == 0);
1062}
1063
1064#if HAVE_MREMAP
1065
1066#if __STD_C
1067static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1068#else
1069static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1070#endif
1071{
1072 size_t page_mask = malloc_getpagesize - 1;
1073 INTERNAL_SIZE_T offset = p->prev_size;
1074 INTERNAL_SIZE_T size = chunksize(p);
1075 char *cp;
1076
1077 assert (chunk_is_mmapped(p));
1078 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1079 assert((n_mmaps > 0));
1080 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1081
1082 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1083 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1084
1085 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1086
1087 if (cp == (char *)-1) return 0;
1088
1089 p = (mchunkptr)(cp + offset);
1090
1091 assert(aligned_OK(chunk2mem(p)));
1092
1093 assert((p->prev_size == offset));
1094 set_head(p, (new_size - offset)|IS_MMAPPED);
1095
1096 mmapped_mem -= size + offset;
1097 mmapped_mem += new_size;
1098 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1099 max_mmapped_mem = mmapped_mem;
1100 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1101 max_total_mem = mmapped_mem + sbrked_mem;
1102 return p;
1103}
1104
1105#endif /* HAVE_MREMAP */
1106
1107#endif /* HAVE_MMAP */
1108
wdenk217c9da2002-10-25 20:35:49 +00001109/*
1110 Extend the top-most chunk by obtaining memory from system.
1111 Main interface to sbrk (but see also malloc_trim).
1112*/
1113
1114#if __STD_C
1115static void malloc_extend_top(INTERNAL_SIZE_T nb)
1116#else
1117static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1118#endif
1119{
1120 char* brk; /* return value from sbrk */
1121 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1122 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
1123 char* new_brk; /* return of 2nd sbrk call */
1124 INTERNAL_SIZE_T top_size; /* new size of top chunk */
1125
1126 mchunkptr old_top = top; /* Record state of old top */
1127 INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1128 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
1129
1130 /* Pad request with top_pad plus minimal overhead */
1131
1132 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
1133 unsigned long pagesz = malloc_getpagesize;
1134
1135 /* If not the first time through, round to preserve page boundary */
1136 /* Otherwise, we need to correct to a page size below anyway. */
1137 /* (We also correct below if an intervening foreign sbrk call.) */
1138
1139 if (sbrk_base != (char*)(-1))
1140 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1141
1142 brk = (char*)(MORECORE (sbrk_size));
1143
1144 /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1145 if (brk == (char*)(MORECORE_FAILURE) ||
1146 (brk < old_end && old_top != initial_top))
1147 return;
1148
1149 sbrked_mem += sbrk_size;
1150
1151 if (brk == old_end) /* can just add bytes to current top */
1152 {
1153 top_size = sbrk_size + old_top_size;
1154 set_head(top, top_size | PREV_INUSE);
1155 }
1156 else
1157 {
1158 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
1159 sbrk_base = brk;
1160 else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
1161 sbrked_mem += brk - (char*)old_end;
1162
1163 /* Guarantee alignment of first new chunk made from this space */
1164 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1165 if (front_misalign > 0)
1166 {
1167 correction = (MALLOC_ALIGNMENT) - front_misalign;
1168 brk += correction;
1169 }
1170 else
1171 correction = 0;
1172
1173 /* Guarantee the next brk will be at a page boundary */
1174
1175 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
wdenk57b2d802003-06-27 21:31:46 +00001176 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
wdenk217c9da2002-10-25 20:35:49 +00001177
1178 /* Allocate correction */
1179 new_brk = (char*)(MORECORE (correction));
1180 if (new_brk == (char*)(MORECORE_FAILURE)) return;
1181
1182 sbrked_mem += correction;
1183
1184 top = (mchunkptr)brk;
1185 top_size = new_brk - brk + correction;
1186 set_head(top, top_size | PREV_INUSE);
1187
1188 if (old_top != initial_top)
1189 {
1190
1191 /* There must have been an intervening foreign sbrk call. */
1192 /* A double fencepost is necessary to prevent consolidation */
1193
1194 /* If not enough space to do this, then user did something very wrong */
1195 if (old_top_size < MINSIZE)
1196 {
wdenk57b2d802003-06-27 21:31:46 +00001197 set_head(top, PREV_INUSE); /* will force null return from malloc */
1198 return;
wdenk217c9da2002-10-25 20:35:49 +00001199 }
1200
1201 /* Also keep size a multiple of MALLOC_ALIGNMENT */
1202 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1203 set_head_size(old_top, old_top_size);
1204 chunk_at_offset(old_top, old_top_size )->size =
wdenk57b2d802003-06-27 21:31:46 +00001205 SIZE_SZ|PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +00001206 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
wdenk57b2d802003-06-27 21:31:46 +00001207 SIZE_SZ|PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +00001208 /* If possible, release the rest. */
1209 if (old_top_size >= MINSIZE)
wdenk57b2d802003-06-27 21:31:46 +00001210 fREe(chunk2mem(old_top));
wdenk217c9da2002-10-25 20:35:49 +00001211 }
1212 }
1213
1214 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1215 max_sbrked_mem = sbrked_mem;
1216 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1217 max_total_mem = mmapped_mem + sbrked_mem;
1218
1219 /* We always land on a page boundary */
1220 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1221}
1222
1223
Simon Glass7471cc72014-07-10 22:23:25 -06001224
wdenk217c9da2002-10-25 20:35:49 +00001225
1226/* Main public routines */
1227
1228
1229/*
1230 Malloc Algorthim:
1231
1232 The requested size is first converted into a usable form, `nb'.
1233 This currently means to add 4 bytes overhead plus possibly more to
1234 obtain 8-byte alignment and/or to obtain a size of at least
1235 MINSIZE (currently 16 bytes), the smallest allocatable size.
1236 (All fits are considered `exact' if they are within MINSIZE bytes.)
1237
1238 From there, the first successful of the following steps is taken:
1239
1240 1. The bin corresponding to the request size is scanned, and if
wdenk57b2d802003-06-27 21:31:46 +00001241 a chunk of exactly the right size is found, it is taken.
wdenk217c9da2002-10-25 20:35:49 +00001242
1243 2. The most recently remaindered chunk is used if it is big
wdenk57b2d802003-06-27 21:31:46 +00001244 enough. This is a form of (roving) first fit, used only in
1245 the absence of exact fits. Runs of consecutive requests use
1246 the remainder of the chunk used for the previous such request
1247 whenever possible. This limited use of a first-fit style
1248 allocation strategy tends to give contiguous chunks
1249 coextensive lifetimes, which improves locality and can reduce
1250 fragmentation in the long run.
wdenk217c9da2002-10-25 20:35:49 +00001251
1252 3. Other bins are scanned in increasing size order, using a
wdenk57b2d802003-06-27 21:31:46 +00001253 chunk big enough to fulfill the request, and splitting off
1254 any remainder. This search is strictly by best-fit; i.e.,
1255 the smallest (with ties going to approximately the least
1256 recently used) chunk that fits is selected.
wdenk217c9da2002-10-25 20:35:49 +00001257
1258 4. If large enough, the chunk bordering the end of memory
wdenk57b2d802003-06-27 21:31:46 +00001259 (`top') is split off. (This use of `top' is in accord with
1260 the best-fit search rule. In effect, `top' is treated as
1261 larger (and thus less well fitting) than any other available
1262 chunk since it can be extended to be as large as necessary
1263 (up to system limitations).
wdenk217c9da2002-10-25 20:35:49 +00001264
1265 5. If the request size meets the mmap threshold and the
wdenk57b2d802003-06-27 21:31:46 +00001266 system supports mmap, and there are few enough currently
1267 allocated mmapped regions, and a call to mmap succeeds,
1268 the request is allocated via direct memory mapping.
wdenk217c9da2002-10-25 20:35:49 +00001269
1270 6. Otherwise, the top of memory is extended by
wdenk57b2d802003-06-27 21:31:46 +00001271 obtaining more space from the system (normally using sbrk,
1272 but definable to anything else via the MORECORE macro).
1273 Memory is gathered from the system (in system page-sized
1274 units) in a way that allows chunks obtained across different
1275 sbrk calls to be consolidated, but does not require
1276 contiguous memory. Thus, it should be safe to intersperse
1277 mallocs with other sbrk calls.
wdenk217c9da2002-10-25 20:35:49 +00001278
1279
1280 All allocations are made from the the `lowest' part of any found
1281 chunk. (The implementation invariant is that prev_inuse is
1282 always true of any allocated chunk; i.e., that each allocated
1283 chunk borders either a previously allocated and still in-use chunk,
1284 or the base of its memory arena.)
1285
1286*/
1287
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001288STATIC_IF_MCHECK
wdenk217c9da2002-10-25 20:35:49 +00001289#if __STD_C
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001290Void_t* mALLOc_impl(size_t bytes)
wdenk217c9da2002-10-25 20:35:49 +00001291#else
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001292Void_t* mALLOc_impl(bytes) size_t bytes;
wdenk217c9da2002-10-25 20:35:49 +00001293#endif
1294{
1295 mchunkptr victim; /* inspected/selected chunk */
1296 INTERNAL_SIZE_T victim_size; /* its size */
1297 int idx; /* index for bin traversal */
1298 mbinptr bin; /* associated bin */
1299 mchunkptr remainder; /* remainder from a split */
1300 long remainder_size; /* its size */
1301 int remainder_index; /* its bin index */
1302 unsigned long block; /* block traverser bit */
1303 int startidx; /* first bin of a traversed block */
1304 mchunkptr fwd; /* misc temp for linking */
1305 mchunkptr bck; /* misc temp for linking */
1306 mbinptr q; /* misc temp */
1307
1308 INTERNAL_SIZE_T nb;
1309
Simon Glassadad2d02023-09-26 08:14:27 -06001310#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Stephen Warren317581e2016-03-05 10:30:53 -07001311 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
Simon Glass94890462014-11-10 17:16:43 -07001312 return malloc_simple(bytes);
Simon Glass863e4042014-07-10 22:23:28 -06001313#endif
1314
Simon Glass1a3e39b2022-09-06 20:27:00 -06001315 if (CONFIG_IS_ENABLED(UNIT_TEST) && malloc_testing) {
1316 if (--malloc_max_allocs < 0)
1317 return NULL;
1318 }
1319
Wolfgang Denkb6349422010-01-15 11:20:10 +01001320 /* check if mem_malloc_init() was run */
1321 if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) {
1322 /* not initialized yet */
Kim Phillipsb052b602012-10-29 13:34:32 +00001323 return NULL;
Wolfgang Denkb6349422010-01-15 11:20:10 +01001324 }
1325
Kim Phillipsb052b602012-10-29 13:34:32 +00001326 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001327
1328 nb = request2size(bytes); /* padded request size; */
1329
1330 /* Check for exact match in a bin */
1331
1332 if (is_small_request(nb)) /* Faster version for small requests */
1333 {
1334 idx = smallbin_index(nb);
1335
1336 /* No traversal or size check necessary for small bins. */
1337
1338 q = bin_at(idx);
1339 victim = last(q);
1340
1341 /* Also scan the next one, since it would have a remainder < MINSIZE */
1342 if (victim == q)
1343 {
1344 q = next_bin(q);
1345 victim = last(q);
1346 }
1347 if (victim != q)
1348 {
1349 victim_size = chunksize(victim);
1350 unlink(victim, bck, fwd);
1351 set_inuse_bit_at_offset(victim, victim_size);
1352 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001353 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001354 return chunk2mem(victim);
1355 }
1356
1357 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1358
1359 }
1360 else
1361 {
1362 idx = bin_index(nb);
1363 bin = bin_at(idx);
1364
1365 for (victim = last(bin); victim != bin; victim = victim->bk)
1366 {
1367 victim_size = chunksize(victim);
1368 remainder_size = victim_size - nb;
1369
1370 if (remainder_size >= (long)MINSIZE) /* too big */
1371 {
wdenk57b2d802003-06-27 21:31:46 +00001372 --idx; /* adjust to rescan below after checking last remainder */
1373 break;
wdenk217c9da2002-10-25 20:35:49 +00001374 }
1375
1376 else if (remainder_size >= 0) /* exact fit */
1377 {
wdenk57b2d802003-06-27 21:31:46 +00001378 unlink(victim, bck, fwd);
1379 set_inuse_bit_at_offset(victim, victim_size);
1380 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001381 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001382 return chunk2mem(victim);
wdenk217c9da2002-10-25 20:35:49 +00001383 }
1384 }
1385
1386 ++idx;
1387
1388 }
1389
1390 /* Try to use the last split-off remainder */
1391
1392 if ( (victim = last_remainder->fd) != last_remainder)
1393 {
1394 victim_size = chunksize(victim);
1395 remainder_size = victim_size - nb;
1396
1397 if (remainder_size >= (long)MINSIZE) /* re-split */
1398 {
1399 remainder = chunk_at_offset(victim, nb);
1400 set_head(victim, nb | PREV_INUSE);
1401 link_last_remainder(remainder);
1402 set_head(remainder, remainder_size | PREV_INUSE);
1403 set_foot(remainder, remainder_size);
1404 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001405 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001406 return chunk2mem(victim);
1407 }
1408
1409 clear_last_remainder;
1410
1411 if (remainder_size >= 0) /* exhaust */
1412 {
1413 set_inuse_bit_at_offset(victim, victim_size);
1414 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001415 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001416 return chunk2mem(victim);
1417 }
1418
1419 /* Else place in bin */
1420
1421 frontlink(victim, victim_size, remainder_index, bck, fwd);
1422 }
1423
1424 /*
1425 If there are any possibly nonempty big-enough blocks,
1426 search for best fitting chunk by scanning bins in blockwidth units.
1427 */
1428
Stefan Roese37628252008-08-06 14:05:38 +02001429 if ( (block = idx2binblock(idx)) <= binblocks_r)
wdenk217c9da2002-10-25 20:35:49 +00001430 {
1431
1432 /* Get to the first marked block */
1433
Stefan Roese37628252008-08-06 14:05:38 +02001434 if ( (block & binblocks_r) == 0)
wdenk217c9da2002-10-25 20:35:49 +00001435 {
1436 /* force to an even block boundary */
1437 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1438 block <<= 1;
Stefan Roese37628252008-08-06 14:05:38 +02001439 while ((block & binblocks_r) == 0)
wdenk217c9da2002-10-25 20:35:49 +00001440 {
wdenk57b2d802003-06-27 21:31:46 +00001441 idx += BINBLOCKWIDTH;
1442 block <<= 1;
wdenk217c9da2002-10-25 20:35:49 +00001443 }
1444 }
1445
1446 /* For each possibly nonempty block ... */
1447 for (;;)
1448 {
1449 startidx = idx; /* (track incomplete blocks) */
1450 q = bin = bin_at(idx);
1451
1452 /* For each bin in this block ... */
1453 do
1454 {
wdenk57b2d802003-06-27 21:31:46 +00001455 /* Find and use first big enough chunk ... */
wdenk217c9da2002-10-25 20:35:49 +00001456
wdenk57b2d802003-06-27 21:31:46 +00001457 for (victim = last(bin); victim != bin; victim = victim->bk)
1458 {
1459 victim_size = chunksize(victim);
1460 remainder_size = victim_size - nb;
wdenk217c9da2002-10-25 20:35:49 +00001461
wdenk57b2d802003-06-27 21:31:46 +00001462 if (remainder_size >= (long)MINSIZE) /* split */
1463 {
1464 remainder = chunk_at_offset(victim, nb);
1465 set_head(victim, nb | PREV_INUSE);
1466 unlink(victim, bck, fwd);
1467 link_last_remainder(remainder);
1468 set_head(remainder, remainder_size | PREV_INUSE);
1469 set_foot(remainder, remainder_size);
1470 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001471 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001472 return chunk2mem(victim);
1473 }
wdenk217c9da2002-10-25 20:35:49 +00001474
wdenk57b2d802003-06-27 21:31:46 +00001475 else if (remainder_size >= 0) /* take */
1476 {
1477 set_inuse_bit_at_offset(victim, victim_size);
1478 unlink(victim, bck, fwd);
1479 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001480 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001481 return chunk2mem(victim);
1482 }
wdenk217c9da2002-10-25 20:35:49 +00001483
wdenk57b2d802003-06-27 21:31:46 +00001484 }
wdenk217c9da2002-10-25 20:35:49 +00001485
1486 bin = next_bin(bin);
1487
1488 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1489
1490 /* Clear out the block bit. */
1491
1492 do /* Possibly backtrack to try to clear a partial block */
1493 {
wdenk57b2d802003-06-27 21:31:46 +00001494 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1495 {
Stefan Roese37628252008-08-06 14:05:38 +02001496 av_[1] = (mbinptr)(binblocks_r & ~block);
wdenk57b2d802003-06-27 21:31:46 +00001497 break;
1498 }
1499 --startidx;
wdenk217c9da2002-10-25 20:35:49 +00001500 q = prev_bin(q);
1501 } while (first(q) == q);
1502
1503 /* Get to the next possibly nonempty block */
1504
Stefan Roese37628252008-08-06 14:05:38 +02001505 if ( (block <<= 1) <= binblocks_r && (block != 0) )
wdenk217c9da2002-10-25 20:35:49 +00001506 {
Stefan Roese37628252008-08-06 14:05:38 +02001507 while ((block & binblocks_r) == 0)
wdenk57b2d802003-06-27 21:31:46 +00001508 {
1509 idx += BINBLOCKWIDTH;
1510 block <<= 1;
1511 }
wdenk217c9da2002-10-25 20:35:49 +00001512 }
1513 else
wdenk57b2d802003-06-27 21:31:46 +00001514 break;
wdenk217c9da2002-10-25 20:35:49 +00001515 }
1516 }
1517
1518
1519 /* Try to use top chunk */
1520
1521 /* Require that there be a remainder, ensuring top always exists */
1522 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1523 {
1524
1525#if HAVE_MMAP
1526 /* If big and would otherwise need to extend, try to use mmap instead */
1527 if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001528 (victim = mmap_chunk(nb)))
Sean Anderson98011e22022-03-23 14:04:49 -04001529 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001530 return chunk2mem(victim);
1531#endif
1532
1533 /* Try to extend */
1534 malloc_extend_top(nb);
1535 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
Kim Phillipsb052b602012-10-29 13:34:32 +00001536 return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001537 }
1538
1539 victim = top;
1540 set_head(victim, nb | PREV_INUSE);
1541 top = chunk_at_offset(victim, nb);
1542 set_head(top, remainder_size | PREV_INUSE);
1543 check_malloced_chunk(victim, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001544 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00001545 return chunk2mem(victim);
1546
1547}
1548
1549
Simon Glass7471cc72014-07-10 22:23:25 -06001550
wdenk217c9da2002-10-25 20:35:49 +00001551
1552/*
1553
1554 free() algorithm :
1555
1556 cases:
1557
1558 1. free(0) has no effect.
1559
1560 2. If the chunk was allocated via mmap, it is release via munmap().
1561
1562 3. If a returned chunk borders the current high end of memory,
wdenk57b2d802003-06-27 21:31:46 +00001563 it is consolidated into the top, and if the total unused
1564 topmost memory exceeds the trim threshold, malloc_trim is
1565 called.
wdenk217c9da2002-10-25 20:35:49 +00001566
1567 4. Other chunks are consolidated as they arrive, and
wdenk57b2d802003-06-27 21:31:46 +00001568 placed in corresponding bins. (This includes the case of
1569 consolidating with the current `last_remainder').
wdenk217c9da2002-10-25 20:35:49 +00001570
1571*/
1572
1573
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001574STATIC_IF_MCHECK
wdenk217c9da2002-10-25 20:35:49 +00001575#if __STD_C
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001576void fREe_impl(Void_t* mem)
wdenk217c9da2002-10-25 20:35:49 +00001577#else
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001578void fREe_impl(mem) Void_t* mem;
wdenk217c9da2002-10-25 20:35:49 +00001579#endif
1580{
1581 mchunkptr p; /* chunk corresponding to mem */
1582 INTERNAL_SIZE_T hd; /* its head field */
1583 INTERNAL_SIZE_T sz; /* its size */
1584 int idx; /* its bin index */
1585 mchunkptr next; /* next contiguous chunk */
1586 INTERNAL_SIZE_T nextsz; /* its size */
1587 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1588 mchunkptr bck; /* misc temp for linking */
1589 mchunkptr fwd; /* misc temp for linking */
1590 int islr; /* track whether merging with last_remainder */
1591
Simon Glassadad2d02023-09-26 08:14:27 -06001592#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Simon Glass863e4042014-07-10 22:23:28 -06001593 /* free() is a no-op - all the memory will be freed on relocation */
Sean Anderson98011e22022-03-23 14:04:49 -04001594 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1595 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
Simon Glass863e4042014-07-10 22:23:28 -06001596 return;
Sean Anderson98011e22022-03-23 14:04:49 -04001597 }
Simon Glass863e4042014-07-10 22:23:28 -06001598#endif
1599
Kim Phillipsb052b602012-10-29 13:34:32 +00001600 if (mem == NULL) /* free(0) has no effect */
wdenk217c9da2002-10-25 20:35:49 +00001601 return;
1602
1603 p = mem2chunk(mem);
1604 hd = p->size;
1605
1606#if HAVE_MMAP
1607 if (hd & IS_MMAPPED) /* release mmapped memory. */
1608 {
1609 munmap_chunk(p);
1610 return;
1611 }
1612#endif
1613
1614 check_inuse_chunk(p);
1615
1616 sz = hd & ~PREV_INUSE;
1617 next = chunk_at_offset(p, sz);
1618 nextsz = chunksize(next);
Sean Anderson98011e22022-03-23 14:04:49 -04001619 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
wdenk217c9da2002-10-25 20:35:49 +00001620
1621 if (next == top) /* merge with top */
1622 {
1623 sz += nextsz;
1624
1625 if (!(hd & PREV_INUSE)) /* consolidate backward */
1626 {
1627 prevsz = p->prev_size;
1628 p = chunk_at_offset(p, -((long) prevsz));
1629 sz += prevsz;
1630 unlink(p, bck, fwd);
1631 }
1632
1633 set_head(p, sz | PREV_INUSE);
1634 top = p;
1635 if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1636 malloc_trim(top_pad);
1637 return;
1638 }
1639
1640 set_head(next, nextsz); /* clear inuse bit */
1641
1642 islr = 0;
1643
1644 if (!(hd & PREV_INUSE)) /* consolidate backward */
1645 {
1646 prevsz = p->prev_size;
1647 p = chunk_at_offset(p, -((long) prevsz));
1648 sz += prevsz;
1649
1650 if (p->fd == last_remainder) /* keep as last_remainder */
1651 islr = 1;
1652 else
1653 unlink(p, bck, fwd);
1654 }
1655
1656 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
1657 {
1658 sz += nextsz;
1659
1660 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1661 {
1662 islr = 1;
1663 link_last_remainder(p);
1664 }
1665 else
1666 unlink(next, bck, fwd);
1667 }
1668
1669
1670 set_head(p, sz | PREV_INUSE);
1671 set_foot(p, sz);
1672 if (!islr)
1673 frontlink(p, sz, idx, bck, fwd);
1674}
1675
1676
wdenk217c9da2002-10-25 20:35:49 +00001677
1678
Simon Glass7471cc72014-07-10 22:23:25 -06001679
wdenk217c9da2002-10-25 20:35:49 +00001680/*
1681
1682 Realloc algorithm:
1683
1684 Chunks that were obtained via mmap cannot be extended or shrunk
1685 unless HAVE_MREMAP is defined, in which case mremap is used.
1686 Otherwise, if their reallocation is for additional space, they are
1687 copied. If for less, they are just left alone.
1688
1689 Otherwise, if the reallocation is for additional space, and the
1690 chunk can be extended, it is, else a malloc-copy-free sequence is
1691 taken. There are several different ways that a chunk could be
1692 extended. All are tried:
1693
1694 * Extending forward into following adjacent free chunk.
1695 * Shifting backwards, joining preceding adjacent space
1696 * Both shifting backwards and extending forward.
1697 * Extending into newly sbrked space
1698
1699 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1700 size argument of zero (re)allocates a minimum-sized chunk.
1701
1702 If the reallocation is for less space, and the new request is for
1703 a `small' (<512 bytes) size, then the newly unused space is lopped
1704 off and freed.
1705
1706 The old unix realloc convention of allowing the last-free'd chunk
1707 to be used as an argument to realloc is no longer supported.
1708 I don't know of any programs still relying on this feature,
1709 and allowing it would also allow too many other incorrect
1710 usages of realloc to be sensible.
1711
1712
1713*/
1714
1715
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001716STATIC_IF_MCHECK
wdenk217c9da2002-10-25 20:35:49 +00001717#if __STD_C
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001718Void_t* rEALLOc_impl(Void_t* oldmem, size_t bytes)
wdenk217c9da2002-10-25 20:35:49 +00001719#else
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001720Void_t* rEALLOc_impl(oldmem, bytes) Void_t* oldmem; size_t bytes;
wdenk217c9da2002-10-25 20:35:49 +00001721#endif
1722{
1723 INTERNAL_SIZE_T nb; /* padded request size */
1724
1725 mchunkptr oldp; /* chunk corresponding to oldmem */
1726 INTERNAL_SIZE_T oldsize; /* its size */
1727
1728 mchunkptr newp; /* chunk to return */
1729 INTERNAL_SIZE_T newsize; /* its size */
1730 Void_t* newmem; /* corresponding user mem */
1731
1732 mchunkptr next; /* next contiguous chunk after oldp */
1733 INTERNAL_SIZE_T nextsize; /* its size */
1734
1735 mchunkptr prev; /* previous contiguous chunk before oldp */
1736 INTERNAL_SIZE_T prevsize; /* its size */
1737
1738 mchunkptr remainder; /* holds split off extra space from newp */
1739 INTERNAL_SIZE_T remainder_size; /* its size */
1740
1741 mchunkptr bck; /* misc temp for linking */
1742 mchunkptr fwd; /* misc temp for linking */
1743
1744#ifdef REALLOC_ZERO_BYTES_FREES
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001745 if (!bytes) {
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001746 fREe_impl(oldmem);
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001747 return NULL;
1748 }
wdenk217c9da2002-10-25 20:35:49 +00001749#endif
1750
Kim Phillipsb052b602012-10-29 13:34:32 +00001751 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001752
1753 /* realloc of null is supposed to be same as malloc */
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001754 if (oldmem == NULL) return mALLOc_impl(bytes);
wdenk217c9da2002-10-25 20:35:49 +00001755
Simon Glassadad2d02023-09-26 08:14:27 -06001756#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Simon Glass94890462014-11-10 17:16:43 -07001757 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Simon Glass863e4042014-07-10 22:23:28 -06001758 /* This is harder to support and should not be needed */
1759 panic("pre-reloc realloc() is not supported");
1760 }
1761#endif
1762
wdenk217c9da2002-10-25 20:35:49 +00001763 newp = oldp = mem2chunk(oldmem);
1764 newsize = oldsize = chunksize(oldp);
1765
1766
1767 nb = request2size(bytes);
1768
1769#if HAVE_MMAP
1770 if (chunk_is_mmapped(oldp))
1771 {
1772#if HAVE_MREMAP
1773 newp = mremap_chunk(oldp, nb);
1774 if(newp) return chunk2mem(newp);
1775#endif
1776 /* Note the extra SIZE_SZ overhead. */
1777 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1778 /* Must alloc, copy, free. */
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001779 newmem = mALLOc_impl(bytes);
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001780 if (!newmem)
1781 return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001782 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1783 munmap_chunk(oldp);
1784 return newmem;
1785 }
1786#endif
1787
1788 check_inuse_chunk(oldp);
1789
1790 if ((long)(oldsize) < (long)(nb))
1791 {
1792
1793 /* Try expanding forward */
1794
1795 next = chunk_at_offset(oldp, oldsize);
1796 if (next == top || !inuse(next))
1797 {
1798 nextsize = chunksize(next);
1799
1800 /* Forward into top only if a remainder */
1801 if (next == top)
1802 {
wdenk57b2d802003-06-27 21:31:46 +00001803 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1804 {
1805 newsize += nextsize;
1806 top = chunk_at_offset(oldp, nb);
1807 set_head(top, (newsize - nb) | PREV_INUSE);
1808 set_head_size(oldp, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001809 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
1810 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
wdenk57b2d802003-06-27 21:31:46 +00001811 return chunk2mem(oldp);
1812 }
wdenk217c9da2002-10-25 20:35:49 +00001813 }
1814
1815 /* Forward into next chunk */
1816 else if (((long)(nextsize + newsize) >= (long)(nb)))
1817 {
wdenk57b2d802003-06-27 21:31:46 +00001818 unlink(next, bck, fwd);
1819 newsize += nextsize;
Sean Anderson98011e22022-03-23 14:04:49 -04001820 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
1821 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
wdenk57b2d802003-06-27 21:31:46 +00001822 goto split;
wdenk217c9da2002-10-25 20:35:49 +00001823 }
1824 }
1825 else
1826 {
Kim Phillipsb052b602012-10-29 13:34:32 +00001827 next = NULL;
wdenk217c9da2002-10-25 20:35:49 +00001828 nextsize = 0;
1829 }
1830
1831 /* Try shifting backwards. */
1832
1833 if (!prev_inuse(oldp))
1834 {
1835 prev = prev_chunk(oldp);
1836 prevsize = chunksize(prev);
1837
1838 /* try forward + backward first to save a later consolidation */
1839
Kim Phillipsb052b602012-10-29 13:34:32 +00001840 if (next != NULL)
wdenk217c9da2002-10-25 20:35:49 +00001841 {
wdenk57b2d802003-06-27 21:31:46 +00001842 /* into top */
1843 if (next == top)
1844 {
1845 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1846 {
1847 unlink(prev, bck, fwd);
1848 newp = prev;
1849 newsize += prevsize + nextsize;
1850 newmem = chunk2mem(newp);
Sean Anderson98011e22022-03-23 14:04:49 -04001851 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001852 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1853 top = chunk_at_offset(newp, nb);
1854 set_head(top, (newsize - nb) | PREV_INUSE);
1855 set_head_size(newp, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04001856 VALGRIND_FREELIKE_BLOCK(oldmem, SIZE_SZ);
wdenk57b2d802003-06-27 21:31:46 +00001857 return newmem;
1858 }
1859 }
wdenk217c9da2002-10-25 20:35:49 +00001860
wdenk57b2d802003-06-27 21:31:46 +00001861 /* into next chunk */
1862 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1863 {
1864 unlink(next, bck, fwd);
1865 unlink(prev, bck, fwd);
1866 newp = prev;
1867 newsize += nextsize + prevsize;
1868 newmem = chunk2mem(newp);
Sean Anderson98011e22022-03-23 14:04:49 -04001869 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001870 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1871 goto split;
1872 }
wdenk217c9da2002-10-25 20:35:49 +00001873 }
1874
1875 /* backward only */
Kim Phillipsb052b602012-10-29 13:34:32 +00001876 if (prev != NULL && (long)(prevsize + newsize) >= (long)nb)
wdenk217c9da2002-10-25 20:35:49 +00001877 {
wdenk57b2d802003-06-27 21:31:46 +00001878 unlink(prev, bck, fwd);
1879 newp = prev;
1880 newsize += prevsize;
1881 newmem = chunk2mem(newp);
Sean Anderson98011e22022-03-23 14:04:49 -04001882 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
wdenk57b2d802003-06-27 21:31:46 +00001883 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1884 goto split;
wdenk217c9da2002-10-25 20:35:49 +00001885 }
1886 }
1887
1888 /* Must allocate */
1889
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001890 newmem = mALLOc_impl (bytes);
wdenk217c9da2002-10-25 20:35:49 +00001891
Kim Phillipsb052b602012-10-29 13:34:32 +00001892 if (newmem == NULL) /* propagate failure */
1893 return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001894
1895 /* Avoid copy if newp is next chunk after oldp. */
1896 /* (This can only happen when new chunk is sbrk'ed.) */
1897
1898 if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1899 {
1900 newsize += chunksize(newp);
1901 newp = oldp;
1902 goto split;
1903 }
1904
1905 /* Otherwise copy, free, and exit */
1906 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001907 fREe_impl(oldmem);
wdenk217c9da2002-10-25 20:35:49 +00001908 return newmem;
Sean Anderson98011e22022-03-23 14:04:49 -04001909 } else {
1910 VALGRIND_RESIZEINPLACE_BLOCK(oldmem, 0, bytes, SIZE_SZ);
1911 VALGRIND_MAKE_MEM_DEFINED(oldmem, bytes);
wdenk217c9da2002-10-25 20:35:49 +00001912 }
1913
1914
1915 split: /* split off extra room in old or expanded chunk */
1916
1917 if (newsize - nb >= MINSIZE) /* split off remainder */
1918 {
1919 remainder = chunk_at_offset(newp, nb);
1920 remainder_size = newsize - nb;
1921 set_head_size(newp, nb);
1922 set_head(remainder, remainder_size | PREV_INUSE);
1923 set_inuse_bit_at_offset(remainder, remainder_size);
Sean Anderson98011e22022-03-23 14:04:49 -04001924 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
1925 false);
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001926 fREe_impl(chunk2mem(remainder)); /* let free() deal with it */
wdenk217c9da2002-10-25 20:35:49 +00001927 }
1928 else
1929 {
1930 set_head_size(newp, newsize);
1931 set_inuse_bit_at_offset(newp, newsize);
1932 }
1933
1934 check_inuse_chunk(newp);
1935 return chunk2mem(newp);
1936}
1937
1938
Simon Glass7471cc72014-07-10 22:23:25 -06001939
wdenk217c9da2002-10-25 20:35:49 +00001940
1941/*
1942
1943 memalign algorithm:
1944
1945 memalign requests more than enough space from malloc, finds a spot
1946 within that chunk that meets the alignment request, and then
1947 possibly frees the leading and trailing space.
1948
1949 The alignment argument must be a power of two. This property is not
1950 checked by memalign, so misuse may result in random runtime errors.
1951
1952 8-byte alignment is guaranteed by normal malloc calls, so don't
1953 bother calling memalign with an argument of 8 or less.
1954
1955 Overreliance on memalign is a sure way to fragment space.
1956
1957*/
1958
1959
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001960STATIC_IF_MCHECK
wdenk217c9da2002-10-25 20:35:49 +00001961#if __STD_C
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001962Void_t* mEMALIGn_impl(size_t alignment, size_t bytes)
wdenk217c9da2002-10-25 20:35:49 +00001963#else
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001964Void_t* mEMALIGn_impl(alignment, bytes) size_t alignment; size_t bytes;
wdenk217c9da2002-10-25 20:35:49 +00001965#endif
1966{
1967 INTERNAL_SIZE_T nb; /* padded request size */
1968 char* m; /* memory returned by malloc call */
1969 mchunkptr p; /* corresponding chunk */
1970 char* brk; /* alignment point within p */
1971 mchunkptr newp; /* chunk to return */
1972 INTERNAL_SIZE_T newsize; /* its size */
1973 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
1974 mchunkptr remainder; /* spare room at end to split off */
1975 long remainder_size; /* its size */
1976
Kim Phillipsb052b602012-10-29 13:34:32 +00001977 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001978
Simon Glassadad2d02023-09-26 08:14:27 -06001979#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Ley Foon Tan2427ec62018-05-18 18:03:12 +08001980 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Andreas Dannenbergecc27402019-03-27 13:17:26 -05001981 return memalign_simple(alignment, bytes);
Ley Foon Tan2427ec62018-05-18 18:03:12 +08001982 }
1983#endif
1984
wdenk217c9da2002-10-25 20:35:49 +00001985 /* If need less alignment than we give anyway, just relay to malloc */
1986
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001987 if (alignment <= MALLOC_ALIGNMENT) return mALLOc_impl(bytes);
wdenk217c9da2002-10-25 20:35:49 +00001988
1989 /* Otherwise, ensure that it is at least a minimum chunk size */
1990
1991 if (alignment < MINSIZE) alignment = MINSIZE;
1992
1993 /* Call malloc with worst case padding to hit alignment. */
1994
1995 nb = request2size(bytes);
Eugene Uriev33bd33d2024-03-31 23:03:19 +03001996 m = (char*)(mALLOc_impl(nb + alignment + MINSIZE));
wdenk217c9da2002-10-25 20:35:49 +00001997
Stephen Warren54ca0c72016-01-25 14:03:42 -07001998 /*
1999 * The attempt to over-allocate (with a size large enough to guarantee the
2000 * ability to find an aligned region within allocated memory) failed.
2001 *
2002 * Try again, this time only allocating exactly the size the user wants. If
2003 * the allocation now succeeds and just happens to be aligned, we can still
2004 * fulfill the user's request.
2005 */
2006 if (m == NULL) {
Stephen Warren44628b82016-04-25 15:55:42 -06002007 size_t extra, extra2;
Stephen Warren54ca0c72016-01-25 14:03:42 -07002008 /*
2009 * Use bytes not nb, since mALLOc internally calls request2size too, and
2010 * each call increases the size to allocate, to account for the header.
2011 */
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002012 m = (char*)(mALLOc_impl(bytes));
Stephen Warren54ca0c72016-01-25 14:03:42 -07002013 /* Aligned -> return it */
2014 if ((((unsigned long)(m)) % alignment) == 0)
2015 return m;
Stephen Warren44628b82016-04-25 15:55:42 -06002016 /*
2017 * Otherwise, try again, requesting enough extra space to be able to
2018 * acquire alignment.
2019 */
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002020 fREe_impl(m);
Stephen Warren44628b82016-04-25 15:55:42 -06002021 /* Add in extra bytes to match misalignment of unexpanded allocation */
2022 extra = alignment - (((unsigned long)(m)) % alignment);
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002023 m = (char*)(mALLOc_impl(bytes + extra));
Stephen Warren44628b82016-04-25 15:55:42 -06002024 /*
2025 * m might not be the same as before. Validate that the previous value of
2026 * extra still works for the current value of m.
2027 * If (!m), extra2=alignment so
2028 */
2029 if (m) {
2030 extra2 = alignment - (((unsigned long)(m)) % alignment);
2031 if (extra2 > extra) {
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002032 fREe_impl(m);
Stephen Warren44628b82016-04-25 15:55:42 -06002033 m = NULL;
2034 }
2035 }
2036 /* Fall through to original NULL check and chunk splitting logic */
Stephen Warren54ca0c72016-01-25 14:03:42 -07002037 }
2038
Kim Phillipsb052b602012-10-29 13:34:32 +00002039 if (m == NULL) return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00002040
2041 p = mem2chunk(m);
2042
2043 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
2044 {
2045#if HAVE_MMAP
2046 if(chunk_is_mmapped(p))
2047 return chunk2mem(p); /* nothing more to do */
2048#endif
2049 }
2050 else /* misaligned */
2051 {
2052 /*
2053 Find an aligned spot inside chunk.
2054 Since we need to give back leading space in a chunk of at
2055 least MINSIZE, if the first calculation places us at
2056 a spot with less than MINSIZE leader, we can move to the
2057 next aligned spot -- we've allocated enough total room so that
2058 this is always possible.
2059 */
2060
2061 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
2062 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
2063
2064 newp = (mchunkptr)brk;
2065 leadsize = brk - (char*)(p);
2066 newsize = chunksize(p) - leadsize;
2067
2068#if HAVE_MMAP
2069 if(chunk_is_mmapped(p))
2070 {
2071 newp->prev_size = p->prev_size + leadsize;
2072 set_head(newp, newsize|IS_MMAPPED);
2073 return chunk2mem(newp);
2074 }
2075#endif
2076
2077 /* give back leader, use the rest */
2078
2079 set_head(newp, newsize | PREV_INUSE);
2080 set_inuse_bit_at_offset(newp, newsize);
2081 set_head_size(p, leadsize);
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002082 fREe_impl(chunk2mem(p));
wdenk217c9da2002-10-25 20:35:49 +00002083 p = newp;
Sean Anderson98011e22022-03-23 14:04:49 -04002084 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(p), bytes, SIZE_SZ, false);
wdenk217c9da2002-10-25 20:35:49 +00002085
2086 assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
2087 }
2088
2089 /* Also give back spare room at the end */
2090
2091 remainder_size = chunksize(p) - nb;
2092
2093 if (remainder_size >= (long)MINSIZE)
2094 {
2095 remainder = chunk_at_offset(p, nb);
2096 set_head(remainder, remainder_size | PREV_INUSE);
2097 set_head_size(p, nb);
Sean Anderson98011e22022-03-23 14:04:49 -04002098 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
2099 false);
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002100 fREe_impl(chunk2mem(remainder));
wdenk217c9da2002-10-25 20:35:49 +00002101 }
2102
2103 check_inuse_chunk(p);
2104 return chunk2mem(p);
2105
2106}
2107
wdenk217c9da2002-10-25 20:35:49 +00002108
2109
Simon Glass7471cc72014-07-10 22:23:25 -06002110
wdenk217c9da2002-10-25 20:35:49 +00002111/*
2112 valloc just invokes memalign with alignment argument equal
2113 to the page size of the system (or as near to this as can
2114 be figured out from all the includes/defines above.)
2115*/
2116
2117#if __STD_C
2118Void_t* vALLOc(size_t bytes)
2119#else
2120Void_t* vALLOc(bytes) size_t bytes;
2121#endif
2122{
2123 return mEMALIGn (malloc_getpagesize, bytes);
2124}
2125
2126/*
2127 pvalloc just invokes valloc for the nearest pagesize
2128 that will accommodate request
2129*/
2130
2131
2132#if __STD_C
2133Void_t* pvALLOc(size_t bytes)
2134#else
2135Void_t* pvALLOc(bytes) size_t bytes;
2136#endif
2137{
2138 size_t pagesize = malloc_getpagesize;
2139 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2140}
2141
2142/*
2143
2144 calloc calls malloc, then zeroes out the allocated chunk.
2145
2146*/
2147
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002148STATIC_IF_MCHECK
wdenk217c9da2002-10-25 20:35:49 +00002149#if __STD_C
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002150Void_t* cALLOc_impl(size_t n, size_t elem_size)
wdenk217c9da2002-10-25 20:35:49 +00002151#else
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002152Void_t* cALLOc_impl(n, elem_size) size_t n; size_t elem_size;
wdenk217c9da2002-10-25 20:35:49 +00002153#endif
2154{
2155 mchunkptr p;
2156 INTERNAL_SIZE_T csz;
2157
2158 INTERNAL_SIZE_T sz = n * elem_size;
2159
2160
2161 /* check if expand_top called, in which case don't need to clear */
Shengyu Qu10a3e612023-08-25 00:25:19 +08002162#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
wdenk217c9da2002-10-25 20:35:49 +00002163#if MORECORE_CLEARS
2164 mchunkptr oldtop = top;
2165 INTERNAL_SIZE_T oldtopsize = chunksize(top);
2166#endif
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002167#endif
Eugene Uriev33bd33d2024-03-31 23:03:19 +03002168 Void_t* mem = mALLOc_impl (sz);
wdenk217c9da2002-10-25 20:35:49 +00002169
Kim Phillipsb052b602012-10-29 13:34:32 +00002170 if ((long)n < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00002171
Kim Phillipsb052b602012-10-29 13:34:32 +00002172 if (mem == NULL)
2173 return NULL;
wdenk217c9da2002-10-25 20:35:49 +00002174 else
2175 {
Simon Glassadad2d02023-09-26 08:14:27 -06002176#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Simon Glass94890462014-11-10 17:16:43 -07002177 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Simon Goldschmidt6b890842019-10-25 21:23:35 +02002178 memset(mem, 0, sz);
Simon Glass863e4042014-07-10 22:23:28 -06002179 return mem;
2180 }
2181#endif
wdenk217c9da2002-10-25 20:35:49 +00002182 p = mem2chunk(mem);
2183
2184 /* Two optional cases in which clearing not necessary */
2185
2186
2187#if HAVE_MMAP
2188 if (chunk_is_mmapped(p)) return mem;
2189#endif
2190
2191 csz = chunksize(p);
2192
Shengyu Qu10a3e612023-08-25 00:25:19 +08002193#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
wdenk217c9da2002-10-25 20:35:49 +00002194#if MORECORE_CLEARS
2195 if (p == oldtop && csz > oldtopsize)
2196 {
2197 /* clear only the bytes from non-freshly-sbrked memory */
2198 csz = oldtopsize;
2199 }
2200#endif
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002201#endif
wdenk217c9da2002-10-25 20:35:49 +00002202
2203 MALLOC_ZERO(mem, csz - SIZE_SZ);
Sean Anderson98011e22022-03-23 14:04:49 -04002204 VALGRIND_MAKE_MEM_DEFINED(mem, sz);
wdenk217c9da2002-10-25 20:35:49 +00002205 return mem;
2206 }
2207}
2208
2209/*
2210
2211 cfree just calls free. It is needed/defined on some systems
2212 that pair it with calloc, presumably for odd historical reasons.
2213
2214*/
2215
2216#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2217#if __STD_C
2218void cfree(Void_t *mem)
2219#else
2220void cfree(mem) Void_t *mem;
2221#endif
2222{
2223 fREe(mem);
2224}
2225#endif
2226
Simon Glass7471cc72014-07-10 22:23:25 -06002227
Eugene Uriev172be692024-03-31 23:03:22 +03002228#ifdef MCHECK_HEAP_PROTECTION
2229 #include "mcheck_core.inc.h"
2230 #if !__STD_C
2231 #error "must have __STD_C"
2232 #endif
2233
2234Void_t *mALLOc(size_t bytes)
2235{
Eugene Uriev2601b612024-03-31 23:03:24 +03002236 mcheck_pedantic_prehook();
Eugene Uriev172be692024-03-31 23:03:22 +03002237 size_t fullsz = mcheck_alloc_prehook(bytes);
2238 void *p = mALLOc_impl(fullsz);
2239
2240 if (!p)
2241 return p;
2242 return mcheck_alloc_posthook(p, bytes);
2243}
2244
2245void fREe(Void_t *mem) { fREe_impl(mcheck_free_prehook(mem)); }
2246
2247Void_t *rEALLOc(Void_t *oldmem, size_t bytes)
2248{
Eugene Uriev2601b612024-03-31 23:03:24 +03002249 mcheck_pedantic_prehook();
Eugene Uriev172be692024-03-31 23:03:22 +03002250 if (bytes == 0) {
2251 if (oldmem)
2252 fREe(oldmem);
2253 return NULL;
2254 }
2255
2256 if (oldmem == NULL)
2257 return mALLOc(bytes);
2258
2259 void *p = mcheck_reallocfree_prehook(oldmem);
2260 size_t newsz = mcheck_alloc_prehook(bytes);
2261
2262 p = rEALLOc_impl(p, newsz);
2263 if (!p)
2264 return p;
2265 return mcheck_alloc_noclean_posthook(p, bytes);
2266}
2267
2268Void_t *mEMALIGn(size_t alignment, size_t bytes)
2269{
Eugene Uriev2601b612024-03-31 23:03:24 +03002270 mcheck_pedantic_prehook();
Eugene Urievc61e3132024-03-31 23:03:23 +03002271 size_t fullsz = mcheck_memalign_prehook(alignment, bytes);
2272 void *p = mEMALIGn_impl(alignment, fullsz);
2273
2274 if (!p)
2275 return p;
2276 return mcheck_memalign_posthook(alignment, p, bytes);
Eugene Uriev172be692024-03-31 23:03:22 +03002277}
2278
2279// pvALLOc, vALLOc - redirect to mEMALIGn, defined here, so they need no wrapping.
2280
2281Void_t *cALLOc(size_t n, size_t elem_size)
2282{
Eugene Uriev2601b612024-03-31 23:03:24 +03002283 mcheck_pedantic_prehook();
Eugene Uriev172be692024-03-31 23:03:22 +03002284 // NB: here is no overflow check.
2285 size_t fullsz = mcheck_alloc_prehook(n * elem_size);
2286 void *p = cALLOc_impl(1, fullsz);
2287
2288 if (!p)
2289 return p;
2290 return mcheck_alloc_noclean_posthook(p, n * elem_size);
2291}
2292
2293// mcheck API {
Eugene Uriev2601b612024-03-31 23:03:24 +03002294int mcheck_pedantic(mcheck_abortfunc_t f)
2295{
2296 mcheck_initialize(f, 1);
2297 return 0;
2298}
2299
Eugene Uriev172be692024-03-31 23:03:22 +03002300int mcheck(mcheck_abortfunc_t f)
2301{
2302 mcheck_initialize(f, 0);
2303 return 0;
2304}
2305
Eugene Uriev2601b612024-03-31 23:03:24 +03002306void mcheck_check_all(void) { mcheck_pedantic_check(); }
2307
Eugene Uriev172be692024-03-31 23:03:22 +03002308enum mcheck_status mprobe(void *__ptr) { return mcheck_mprobe(__ptr); }
2309// mcheck API }
2310#endif
2311
wdenk217c9da2002-10-25 20:35:49 +00002312
2313/*
2314
2315 Malloc_trim gives memory back to the system (via negative
2316 arguments to sbrk) if there is unused memory at the `high' end of
2317 the malloc pool. You can call this after freeing large blocks of
2318 memory to potentially reduce the system-level memory requirements
2319 of a program. However, it cannot guarantee to reduce memory. Under
2320 some allocation patterns, some large free blocks of memory will be
2321 locked between two used chunks, so they cannot be given back to
2322 the system.
2323
2324 The `pad' argument to malloc_trim represents the amount of free
2325 trailing space to leave untrimmed. If this argument is zero,
2326 only the minimum amount of memory to maintain internal data
2327 structures will be left (one page or less). Non-zero arguments
2328 can be supplied to maintain enough trailing space to service
2329 future expected allocations without having to re-obtain memory
2330 from the system.
2331
2332 Malloc_trim returns 1 if it actually released any memory, else 0.
2333
2334*/
2335
2336#if __STD_C
2337int malloc_trim(size_t pad)
2338#else
2339int malloc_trim(pad) size_t pad;
2340#endif
2341{
2342 long top_size; /* Amount of top-most memory */
2343 long extra; /* Amount to release */
2344 char* current_brk; /* address returned by pre-check sbrk call */
2345 char* new_brk; /* address returned by negative sbrk call */
2346
2347 unsigned long pagesz = malloc_getpagesize;
2348
2349 top_size = chunksize(top);
2350 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2351
2352 if (extra < (long)pagesz) /* Not enough memory to release */
2353 return 0;
2354
2355 else
2356 {
2357 /* Test to make sure no one else called sbrk */
2358 current_brk = (char*)(MORECORE (0));
2359 if (current_brk != (char*)(top) + top_size)
2360 return 0; /* Apparently we don't own memory; must fail */
2361
2362 else
2363 {
2364 new_brk = (char*)(MORECORE (-extra));
2365
2366 if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2367 {
wdenk57b2d802003-06-27 21:31:46 +00002368 /* Try to figure out what we have */
2369 current_brk = (char*)(MORECORE (0));
2370 top_size = current_brk - (char*)top;
2371 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2372 {
2373 sbrked_mem = current_brk - sbrk_base;
2374 set_head(top, top_size | PREV_INUSE);
2375 }
2376 check_chunk(top);
2377 return 0;
wdenk217c9da2002-10-25 20:35:49 +00002378 }
2379
2380 else
2381 {
wdenk57b2d802003-06-27 21:31:46 +00002382 /* Success. Adjust top accordingly. */
2383 set_head(top, (top_size - extra) | PREV_INUSE);
2384 sbrked_mem -= extra;
2385 check_chunk(top);
2386 return 1;
wdenk217c9da2002-10-25 20:35:49 +00002387 }
2388 }
2389 }
2390}
2391
Simon Glass7471cc72014-07-10 22:23:25 -06002392
wdenk217c9da2002-10-25 20:35:49 +00002393
2394/*
2395 malloc_usable_size:
2396
2397 This routine tells you how many bytes you can actually use in an
2398 allocated chunk, which may be more than you requested (although
2399 often not). You can use this many bytes without worrying about
2400 overwriting other allocated objects. Not a particularly great
2401 programming practice, but still sometimes useful.
2402
2403*/
2404
2405#if __STD_C
2406size_t malloc_usable_size(Void_t* mem)
2407#else
2408size_t malloc_usable_size(mem) Void_t* mem;
2409#endif
2410{
2411 mchunkptr p;
Kim Phillipsb052b602012-10-29 13:34:32 +00002412 if (mem == NULL)
wdenk217c9da2002-10-25 20:35:49 +00002413 return 0;
2414 else
2415 {
2416 p = mem2chunk(mem);
2417 if(!chunk_is_mmapped(p))
2418 {
2419 if (!inuse(p)) return 0;
2420 check_inuse_chunk(p);
2421 return chunksize(p) - SIZE_SZ;
2422 }
2423 return chunksize(p) - 2*SIZE_SZ;
2424 }
2425}
2426
2427
Simon Glass7471cc72014-07-10 22:23:25 -06002428
wdenk217c9da2002-10-25 20:35:49 +00002429
2430/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2431
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002432#ifdef DEBUG
Tom Rini03787a92023-02-27 17:08:34 -05002433static void malloc_update_mallinfo(void)
wdenk217c9da2002-10-25 20:35:49 +00002434{
2435 int i;
2436 mbinptr b;
2437 mchunkptr p;
2438#ifdef DEBUG
2439 mchunkptr q;
2440#endif
2441
2442 INTERNAL_SIZE_T avail = chunksize(top);
2443 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2444
2445 for (i = 1; i < NAV; ++i)
2446 {
2447 b = bin_at(i);
2448 for (p = last(b); p != b; p = p->bk)
2449 {
2450#ifdef DEBUG
2451 check_free_chunk(p);
2452 for (q = next_chunk(p);
wdenk57b2d802003-06-27 21:31:46 +00002453 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2454 q = next_chunk(q))
2455 check_inuse_chunk(q);
wdenk217c9da2002-10-25 20:35:49 +00002456#endif
2457 avail += chunksize(p);
2458 navail++;
2459 }
2460 }
2461
2462 current_mallinfo.ordblks = navail;
2463 current_mallinfo.uordblks = sbrked_mem - avail;
2464 current_mallinfo.fordblks = avail;
2465 current_mallinfo.hblks = n_mmaps;
2466 current_mallinfo.hblkhd = mmapped_mem;
2467 current_mallinfo.keepcost = chunksize(top);
2468
2469}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002470#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002471
Simon Glass7471cc72014-07-10 22:23:25 -06002472
wdenk217c9da2002-10-25 20:35:49 +00002473
2474/*
2475
2476 malloc_stats:
2477
2478 Prints on the amount of space obtain from the system (both
2479 via sbrk and mmap), the maximum amount (which may be more than
2480 current if malloc_trim and/or munmap got called), the maximum
2481 number of simultaneous mmap regions used, and the current number
2482 of bytes allocated via malloc (or realloc, etc) but not yet
2483 freed. (Note that this is the number of bytes allocated, not the
2484 number requested. It will be larger than the number requested
2485 because of alignment and bookkeeping overhead.)
2486
2487*/
2488
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002489#ifdef DEBUG
Tom Rini03787a92023-02-27 17:08:34 -05002490void malloc_stats(void)
wdenk217c9da2002-10-25 20:35:49 +00002491{
2492 malloc_update_mallinfo();
2493 printf("max system bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002494 (unsigned int)(max_total_mem));
wdenk217c9da2002-10-25 20:35:49 +00002495 printf("system bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002496 (unsigned int)(sbrked_mem + mmapped_mem));
wdenk217c9da2002-10-25 20:35:49 +00002497 printf("in use bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002498 (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
wdenk217c9da2002-10-25 20:35:49 +00002499#if HAVE_MMAP
2500 printf("max mmap regions = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002501 (unsigned int)max_n_mmaps);
wdenk217c9da2002-10-25 20:35:49 +00002502#endif
2503}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002504#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002505
2506/*
2507 mallinfo returns a copy of updated current mallinfo.
2508*/
2509
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002510#ifdef DEBUG
Tom Rini03787a92023-02-27 17:08:34 -05002511struct mallinfo mALLINFo(void)
wdenk217c9da2002-10-25 20:35:49 +00002512{
2513 malloc_update_mallinfo();
2514 return current_mallinfo;
2515}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002516#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002517
2518
Simon Glass7471cc72014-07-10 22:23:25 -06002519
wdenk217c9da2002-10-25 20:35:49 +00002520
2521/*
2522 mallopt:
2523
2524 mallopt is the general SVID/XPG interface to tunable parameters.
2525 The format is to provide a (parameter-number, parameter-value) pair.
2526 mallopt then sets the corresponding parameter to the argument
2527 value if it can (i.e., so long as the value is meaningful),
2528 and returns 1 if successful else 0.
2529
2530 See descriptions of tunable parameters above.
2531
2532*/
2533
2534#if __STD_C
2535int mALLOPt(int param_number, int value)
2536#else
2537int mALLOPt(param_number, value) int param_number; int value;
2538#endif
2539{
2540 switch(param_number)
2541 {
2542 case M_TRIM_THRESHOLD:
2543 trim_threshold = value; return 1;
2544 case M_TOP_PAD:
2545 top_pad = value; return 1;
2546 case M_MMAP_THRESHOLD:
2547 mmap_threshold = value; return 1;
2548 case M_MMAP_MAX:
2549#if HAVE_MMAP
2550 n_mmaps_max = value; return 1;
2551#else
2552 if (value != 0) return 0; else n_mmaps_max = value; return 1;
2553#endif
2554
2555 default:
2556 return 0;
2557 }
2558}
2559
Simon Glassd1d087d2015-02-27 22:06:36 -07002560int initf_malloc(void)
2561{
Simon Glassadad2d02023-09-26 08:14:27 -06002562#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
Simon Glassd1d087d2015-02-27 22:06:36 -07002563 assert(gd->malloc_base); /* Set up by crt0.S */
Andy Yan1fa20e4d2017-07-24 17:43:34 +08002564 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN);
Simon Glassd1d087d2015-02-27 22:06:36 -07002565 gd->malloc_ptr = 0;
2566#endif
2567
2568 return 0;
2569}
2570
Simon Glass1a3e39b2022-09-06 20:27:00 -06002571void malloc_enable_testing(int max_allocs)
2572{
2573 malloc_testing = true;
2574 malloc_max_allocs = max_allocs;
2575}
2576
2577void malloc_disable_testing(void)
2578{
2579 malloc_testing = false;
2580}
2581
wdenk217c9da2002-10-25 20:35:49 +00002582/*
2583
2584History:
2585
2586 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
2587 * return null for negative arguments
2588 * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
wdenk57b2d802003-06-27 21:31:46 +00002589 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2590 (e.g. WIN32 platforms)
2591 * Cleanup up header file inclusion for WIN32 platforms
2592 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2593 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2594 memory allocation routines
2595 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2596 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
wdenk217c9da2002-10-25 20:35:49 +00002597 usage of 'assert' in non-WIN32 code
wdenk57b2d802003-06-27 21:31:46 +00002598 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2599 avoid infinite loop
wdenk217c9da2002-10-25 20:35:49 +00002600 * Always call 'fREe()' rather than 'free()'
2601
2602 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
2603 * Fixed ordering problem with boundary-stamping
2604
2605 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
2606 * Added pvalloc, as recommended by H.J. Liu
2607 * Added 64bit pointer support mainly from Wolfram Gloger
2608 * Added anonymously donated WIN32 sbrk emulation
2609 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2610 * malloc_extend_top: fix mask error that caused wastage after
wdenk57b2d802003-06-27 21:31:46 +00002611 foreign sbrks
wdenk217c9da2002-10-25 20:35:49 +00002612 * Add linux mremap support code from HJ Liu
2613
2614 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
2615 * Integrated most documentation with the code.
2616 * Add support for mmap, with help from
wdenk57b2d802003-06-27 21:31:46 +00002617 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
wdenk217c9da2002-10-25 20:35:49 +00002618 * Use last_remainder in more cases.
2619 * Pack bins using idea from colin@nyx10.cs.du.edu
2620 * Use ordered bins instead of best-fit threshhold
2621 * Eliminate block-local decls to simplify tracing and debugging.
2622 * Support another case of realloc via move into top
2623 * Fix error occuring when initial sbrk_base not word-aligned.
2624 * Rely on page size for units instead of SBRK_UNIT to
wdenk57b2d802003-06-27 21:31:46 +00002625 avoid surprises about sbrk alignment conventions.
wdenk217c9da2002-10-25 20:35:49 +00002626 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
wdenk57b2d802003-06-27 21:31:46 +00002627 (raymond@es.ele.tue.nl) for the suggestion.
wdenk217c9da2002-10-25 20:35:49 +00002628 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2629 * More precautions for cases where other routines call sbrk,
wdenk57b2d802003-06-27 21:31:46 +00002630 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
wdenk217c9da2002-10-25 20:35:49 +00002631 * Added macros etc., allowing use in linux libc from
wdenk57b2d802003-06-27 21:31:46 +00002632 H.J. Lu (hjl@gnu.ai.mit.edu)
wdenk217c9da2002-10-25 20:35:49 +00002633 * Inverted this history list
2634
2635 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
2636 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2637 * Removed all preallocation code since under current scheme
wdenk57b2d802003-06-27 21:31:46 +00002638 the work required to undo bad preallocations exceeds
2639 the work saved in good cases for most test programs.
wdenk217c9da2002-10-25 20:35:49 +00002640 * No longer use return list or unconsolidated bins since
wdenk57b2d802003-06-27 21:31:46 +00002641 no scheme using them consistently outperforms those that don't
2642 given above changes.
wdenk217c9da2002-10-25 20:35:49 +00002643 * Use best fit for very large chunks to prevent some worst-cases.
2644 * Added some support for debugging
2645
2646 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
2647 * Removed footers when chunks are in use. Thanks to
wdenk57b2d802003-06-27 21:31:46 +00002648 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
wdenk217c9da2002-10-25 20:35:49 +00002649
2650 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
2651 * Added malloc_trim, with help from Wolfram Gloger
wdenk57b2d802003-06-27 21:31:46 +00002652 (wmglo@Dent.MED.Uni-Muenchen.DE).
wdenk217c9da2002-10-25 20:35:49 +00002653
2654 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
2655
2656 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
2657 * realloc: try to expand in both directions
2658 * malloc: swap order of clean-bin strategy;
2659 * realloc: only conditionally expand backwards
2660 * Try not to scavenge used bins
2661 * Use bin counts as a guide to preallocation
2662 * Occasionally bin return list chunks in first scan
2663 * Add a few optimizations from colin@nyx10.cs.du.edu
2664
2665 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
2666 * faster bin computation & slightly different binning
2667 * merged all consolidations to one part of malloc proper
wdenk57b2d802003-06-27 21:31:46 +00002668 (eliminating old malloc_find_space & malloc_clean_bin)
wdenk217c9da2002-10-25 20:35:49 +00002669 * Scan 2 returns chunks (not just 1)
2670 * Propagate failure in realloc if malloc returns 0
2671 * Add stuff to allow compilation on non-ANSI compilers
wdenk57b2d802003-06-27 21:31:46 +00002672 from kpv@research.att.com
wdenk217c9da2002-10-25 20:35:49 +00002673
2674 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
2675 * removed potential for odd address access in prev_chunk
2676 * removed dependency on getpagesize.h
2677 * misc cosmetics and a bit more internal documentation
2678 * anticosmetics: mangled names in macros to evade debugger strangeness
2679 * tested on sparc, hp-700, dec-mips, rs6000
wdenk57b2d802003-06-27 21:31:46 +00002680 with gcc & native cc (hp, dec only) allowing
2681 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
wdenk217c9da2002-10-25 20:35:49 +00002682
2683 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
2684 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
wdenk57b2d802003-06-27 21:31:46 +00002685 structure of old version, but most details differ.)
wdenk217c9da2002-10-25 20:35:49 +00002686
2687*/