blob: b29a7cfd93d3e0324ad8b4c82be2d35314debb74 [file] [log] [blame]
Kumar Galaff9daf12008-05-13 19:01:54 -05001#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -06002#include <log.h>
Kumar Galaff9daf12008-05-13 19:01:54 -05003
Heinrich Schuchardt28105f42020-04-15 18:46:23 +02004#if CONFIG_IS_ENABLED(UNIT_TEST)
Simon Glass05c86002014-07-10 22:23:33 -06005#define DEBUG
6#endif
7
wdenk217c9da2002-10-25 20:35:49 +00008#include <malloc.h>
Simon Glass863e4042014-07-10 22:23:28 -06009#include <asm/io.h>
10
Wolfgang Denk460a9ff2010-06-20 23:33:59 +020011#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +000012#if __STD_C
13static void malloc_update_mallinfo (void);
14void malloc_stats (void);
15#else
16static void malloc_update_mallinfo ();
17void malloc_stats();
18#endif
Wolfgang Denk460a9ff2010-06-20 23:33:59 +020019#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +000020
Wolfgang Denk6405a152006-03-31 18:32:53 +020021DECLARE_GLOBAL_DATA_PTR;
22
wdenk217c9da2002-10-25 20:35:49 +000023/*
24 Emulation of sbrk for WIN32
25 All code within the ifdef WIN32 is untested by me.
26
27 Thanks to Martin Fong and others for supplying this.
28*/
29
30
31#ifdef WIN32
32
33#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
34~(malloc_getpagesize-1))
35#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
36
37/* resrve 64MB to insure large contiguous space */
38#define RESERVED_SIZE (1024*1024*64)
39#define NEXT_SIZE (2048*1024)
40#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
41
42struct GmListElement;
43typedef struct GmListElement GmListElement;
44
45struct GmListElement
46{
47 GmListElement* next;
48 void* base;
49};
50
51static GmListElement* head = 0;
52static unsigned int gNextAddress = 0;
53static unsigned int gAddressBase = 0;
54static unsigned int gAllocatedSize = 0;
55
56static
57GmListElement* makeGmListElement (void* bas)
58{
59 GmListElement* this;
60 this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
61 assert (this);
62 if (this)
63 {
64 this->base = bas;
65 this->next = head;
66 head = this;
67 }
68 return this;
69}
70
71void gcleanup ()
72{
73 BOOL rval;
74 assert ( (head == NULL) || (head->base == (void*)gAddressBase));
75 if (gAddressBase && (gNextAddress - gAddressBase))
76 {
77 rval = VirtualFree ((void*)gAddressBase,
78 gNextAddress - gAddressBase,
79 MEM_DECOMMIT);
wdenk57b2d802003-06-27 21:31:46 +000080 assert (rval);
wdenk217c9da2002-10-25 20:35:49 +000081 }
82 while (head)
83 {
84 GmListElement* next = head->next;
85 rval = VirtualFree (head->base, 0, MEM_RELEASE);
86 assert (rval);
87 LocalFree (head);
88 head = next;
89 }
90}
91
92static
93void* findRegion (void* start_address, unsigned long size)
94{
95 MEMORY_BASIC_INFORMATION info;
96 if (size >= TOP_MEMORY) return NULL;
97
98 while ((unsigned long)start_address + size < TOP_MEMORY)
99 {
100 VirtualQuery (start_address, &info, sizeof (info));
101 if ((info.State == MEM_FREE) && (info.RegionSize >= size))
102 return start_address;
103 else
104 {
wdenk57b2d802003-06-27 21:31:46 +0000105 /* Requested region is not available so see if the */
106 /* next region is available. Set 'start_address' */
107 /* to the next region and call 'VirtualQuery()' */
108 /* again. */
wdenk217c9da2002-10-25 20:35:49 +0000109
110 start_address = (char*)info.BaseAddress + info.RegionSize;
111
wdenk57b2d802003-06-27 21:31:46 +0000112 /* Make sure we start looking for the next region */
113 /* on the *next* 64K boundary. Otherwise, even if */
114 /* the new region is free according to */
115 /* 'VirtualQuery()', the subsequent call to */
116 /* 'VirtualAlloc()' (which follows the call to */
117 /* this routine in 'wsbrk()') will round *down* */
118 /* the requested address to a 64K boundary which */
119 /* we already know is an address in the */
120 /* unavailable region. Thus, the subsequent call */
121 /* to 'VirtualAlloc()' will fail and bring us back */
122 /* here, causing us to go into an infinite loop. */
wdenk217c9da2002-10-25 20:35:49 +0000123
124 start_address =
125 (void *) AlignPage64K((unsigned long) start_address);
126 }
127 }
128 return NULL;
129
130}
131
132
133void* wsbrk (long size)
134{
135 void* tmp;
136 if (size > 0)
137 {
138 if (gAddressBase == 0)
139 {
140 gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
141 gNextAddress = gAddressBase =
142 (unsigned int)VirtualAlloc (NULL, gAllocatedSize,
143 MEM_RESERVE, PAGE_NOACCESS);
144 } else if (AlignPage (gNextAddress + size) > (gAddressBase +
145gAllocatedSize))
146 {
147 long new_size = max (NEXT_SIZE, AlignPage (size));
148 void* new_address = (void*)(gAddressBase+gAllocatedSize);
149 do
150 {
151 new_address = findRegion (new_address, new_size);
152
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +0100153 if (!new_address)
wdenk217c9da2002-10-25 20:35:49 +0000154 return (void*)-1;
155
156 gAddressBase = gNextAddress =
157 (unsigned int)VirtualAlloc (new_address, new_size,
158 MEM_RESERVE, PAGE_NOACCESS);
wdenk57b2d802003-06-27 21:31:46 +0000159 /* repeat in case of race condition */
160 /* The region that we found has been snagged */
161 /* by another thread */
wdenk217c9da2002-10-25 20:35:49 +0000162 }
163 while (gAddressBase == 0);
164
165 assert (new_address == (void*)gAddressBase);
166
167 gAllocatedSize = new_size;
168
169 if (!makeGmListElement ((void*)gAddressBase))
170 return (void*)-1;
171 }
172 if ((size + gNextAddress) > AlignPage (gNextAddress))
173 {
174 void* res;
175 res = VirtualAlloc ((void*)AlignPage (gNextAddress),
176 (size + gNextAddress -
177 AlignPage (gNextAddress)),
178 MEM_COMMIT, PAGE_READWRITE);
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +0100179 if (!res)
wdenk217c9da2002-10-25 20:35:49 +0000180 return (void*)-1;
181 }
182 tmp = (void*)gNextAddress;
183 gNextAddress = (unsigned int)tmp + size;
184 return tmp;
185 }
186 else if (size < 0)
187 {
188 unsigned int alignedGoal = AlignPage (gNextAddress + size);
189 /* Trim by releasing the virtual memory */
190 if (alignedGoal >= gAddressBase)
191 {
192 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
193 MEM_DECOMMIT);
194 gNextAddress = gNextAddress + size;
195 return (void*)gNextAddress;
196 }
197 else
198 {
199 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
200 MEM_DECOMMIT);
201 gNextAddress = gAddressBase;
202 return (void*)-1;
203 }
204 }
205 else
206 {
207 return (void*)gNextAddress;
208 }
209}
210
211#endif
212
Simon Glass7471cc72014-07-10 22:23:25 -0600213
wdenk217c9da2002-10-25 20:35:49 +0000214
215/*
216 Type declarations
217*/
218
219
220struct malloc_chunk
221{
222 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
223 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
224 struct malloc_chunk* fd; /* double links -- used only if free. */
225 struct malloc_chunk* bk;
Joakim Tjernlundc183eea2010-10-14 08:51:34 +0200226} __attribute__((__may_alias__)) ;
wdenk217c9da2002-10-25 20:35:49 +0000227
228typedef struct malloc_chunk* mchunkptr;
229
230/*
231
232 malloc_chunk details:
233
234 (The following includes lightly edited explanations by Colin Plumb.)
235
236 Chunks of memory are maintained using a `boundary tag' method as
237 described in e.g., Knuth or Standish. (See the paper by Paul
238 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
239 survey of such techniques.) Sizes of free chunks are stored both
240 in the front of each chunk and at the end. This makes
241 consolidating fragmented chunks into bigger chunks very fast. The
242 size fields also hold bits representing whether chunks are free or
243 in use.
244
245 An allocated chunk looks like this:
246
247
248 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000249 | Size of previous chunk, if allocated | |
250 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
251 | Size of chunk, in bytes |P|
wdenk217c9da2002-10-25 20:35:49 +0000252 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000253 | User data starts here... .
254 . .
255 . (malloc_usable_space() bytes) .
256 . |
wdenk217c9da2002-10-25 20:35:49 +0000257nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000258 | Size of chunk |
259 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000260
261
262 Where "chunk" is the front of the chunk for the purpose of most of
263 the malloc code, but "mem" is the pointer that is returned to the
264 user. "Nextchunk" is the beginning of the next contiguous chunk.
265
266 Chunks always begin on even word boundries, so the mem portion
267 (which is returned to the user) is also on an even word boundary, and
268 thus double-word aligned.
269
270 Free chunks are stored in circular doubly-linked lists, and look like this:
271
272 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000273 | Size of previous chunk |
274 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000275 `head:' | Size of chunk, in bytes |P|
276 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk57b2d802003-06-27 21:31:46 +0000277 | Forward pointer to next chunk in list |
278 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
279 | Back pointer to previous chunk in list |
280 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
281 | Unused space (may be 0 bytes long) .
282 . .
283 . |
Marek Bykowskib4032a72020-04-29 18:23:07 +0200284
wdenk217c9da2002-10-25 20:35:49 +0000285nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
286 `foot:' | Size of chunk, in bytes |
wdenk57b2d802003-06-27 21:31:46 +0000287 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
wdenk217c9da2002-10-25 20:35:49 +0000288
289 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
290 chunk size (which is always a multiple of two words), is an in-use
291 bit for the *previous* chunk. If that bit is *clear*, then the
292 word before the current chunk size contains the previous chunk
293 size, and can be used to find the front of the previous chunk.
294 (The very first chunk allocated always has this bit set,
295 preventing access to non-existent (or non-owned) memory.)
296
297 Note that the `foot' of the current chunk is actually represented
298 as the prev_size of the NEXT chunk. (This makes it easier to
299 deal with alignments etc).
300
301 The two exceptions to all this are
302
303 1. The special chunk `top', which doesn't bother using the
wdenk57b2d802003-06-27 21:31:46 +0000304 trailing size field since there is no
305 next contiguous chunk that would have to index off it. (After
306 initialization, `top' is forced to always exist. If it would
307 become less than MINSIZE bytes long, it is replenished via
308 malloc_extend_top.)
wdenk217c9da2002-10-25 20:35:49 +0000309
310 2. Chunks allocated via mmap, which have the second-lowest-order
wdenk57b2d802003-06-27 21:31:46 +0000311 bit (IS_MMAPPED) set in their size fields. Because they are
312 never merged or traversed from any other chunk, they have no
313 foot size or inuse information.
wdenk217c9da2002-10-25 20:35:49 +0000314
315 Available chunks are kept in any of several places (all declared below):
316
317 * `av': An array of chunks serving as bin headers for consolidated
318 chunks. Each bin is doubly linked. The bins are approximately
319 proportionally (log) spaced. There are a lot of these bins
320 (128). This may look excessive, but works very well in
321 practice. All procedures maintain the invariant that no
322 consolidated chunk physically borders another one. Chunks in
323 bins are kept in size order, with ties going to the
324 approximately least recently used chunk.
325
326 The chunks in each bin are maintained in decreasing sorted order by
327 size. This is irrelevant for the small bins, which all contain
328 the same-sized chunks, but facilitates best-fit allocation for
329 larger chunks. (These lists are just sequential. Keeping them in
330 order almost never requires enough traversal to warrant using
331 fancier ordered data structures.) Chunks of the same size are
332 linked with the most recently freed at the front, and allocations
333 are taken from the back. This results in LRU or FIFO allocation
334 order, which tends to give each chunk an equal opportunity to be
335 consolidated with adjacent freed chunks, resulting in larger free
336 chunks and less fragmentation.
337
338 * `top': The top-most available chunk (i.e., the one bordering the
339 end of available memory) is treated specially. It is never
340 included in any bin, is used only if no other chunk is
341 available, and is released back to the system if it is very
342 large (see M_TRIM_THRESHOLD).
343
344 * `last_remainder': A bin holding only the remainder of the
345 most recently split (non-top) chunk. This bin is checked
346 before other non-fitting chunks, so as to provide better
347 locality for runs of sequentially allocated chunks.
348
349 * Implicitly, through the host system's memory mapping tables.
350 If supported, requests greater than a threshold are usually
351 serviced via calls to mmap, and then later released via munmap.
352
353*/
Simon Glass7471cc72014-07-10 22:23:25 -0600354
wdenk217c9da2002-10-25 20:35:49 +0000355/* sizes, alignments */
356
357#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
358#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
359#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
360#define MINSIZE (sizeof(struct malloc_chunk))
361
362/* conversion from malloc headers to user pointers, and back */
363
364#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
365#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
366
367/* pad request bytes into a usable size */
368
369#define request2size(req) \
370 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
371 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
372 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
373
374/* Check if m has acceptable alignment */
375
376#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
377
378
Simon Glass7471cc72014-07-10 22:23:25 -0600379
wdenk217c9da2002-10-25 20:35:49 +0000380
381/*
382 Physical chunk operations
383*/
384
385
386/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
387
388#define PREV_INUSE 0x1
389
390/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
391
392#define IS_MMAPPED 0x2
393
394/* Bits to mask off when extracting size */
395
396#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
397
398
399/* Ptr to next physical malloc_chunk. */
400
401#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
402
403/* Ptr to previous physical malloc_chunk */
404
405#define prev_chunk(p)\
406 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
407
408
409/* Treat space at ptr + offset as a chunk */
410
411#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
412
413
Simon Glass7471cc72014-07-10 22:23:25 -0600414
wdenk217c9da2002-10-25 20:35:49 +0000415
416/*
417 Dealing with use bits
418*/
419
420/* extract p's inuse bit */
421
422#define inuse(p)\
423((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
424
425/* extract inuse bit of previous chunk */
426
427#define prev_inuse(p) ((p)->size & PREV_INUSE)
428
429/* check for mmap()'ed chunk */
430
431#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
432
433/* set/clear chunk as in use without otherwise disturbing */
434
435#define set_inuse(p)\
436((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
437
438#define clear_inuse(p)\
439((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
440
441/* check/set/clear inuse bits in known places */
442
443#define inuse_bit_at_offset(p, s)\
444 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
445
446#define set_inuse_bit_at_offset(p, s)\
447 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
448
449#define clear_inuse_bit_at_offset(p, s)\
450 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
451
452
Simon Glass7471cc72014-07-10 22:23:25 -0600453
wdenk217c9da2002-10-25 20:35:49 +0000454
455/*
456 Dealing with size fields
457*/
458
459/* Get size, ignoring use bits */
460
461#define chunksize(p) ((p)->size & ~(SIZE_BITS))
462
463/* Set size at head, without disturbing its use bit */
464
465#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
466
467/* Set size/use ignoring previous bits in header */
468
469#define set_head(p, s) ((p)->size = (s))
470
471/* Set size at footer (only when chunk is not in use) */
472
473#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
474
475
wdenk217c9da2002-10-25 20:35:49 +0000476
477
Simon Glass7471cc72014-07-10 22:23:25 -0600478
wdenk217c9da2002-10-25 20:35:49 +0000479/*
480 Bins
481
482 The bins, `av_' are an array of pairs of pointers serving as the
483 heads of (initially empty) doubly-linked lists of chunks, laid out
484 in a way so that each pair can be treated as if it were in a
485 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
486 and chunks are the same).
487
488 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
489 8 bytes apart. Larger bins are approximately logarithmically
490 spaced. (See the table below.) The `av_' array is never mentioned
491 directly in the code, but instead via bin access macros.
492
493 Bin layout:
494
495 64 bins of size 8
496 32 bins of size 64
497 16 bins of size 512
498 8 bins of size 4096
499 4 bins of size 32768
500 2 bins of size 262144
501 1 bin of size what's left
502
503 There is actually a little bit of slop in the numbers in bin_index
504 for the sake of speed. This makes no difference elsewhere.
505
506 The special chunks `top' and `last_remainder' get their own bins,
507 (this is implemented via yet more trickery with the av_ array),
508 although `top' is never properly linked to its bin since it is
509 always handled specially.
510
511*/
512
513#define NAV 128 /* number of bins */
514
515typedef struct malloc_chunk* mbinptr;
516
517/* access macros */
518
519#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
520#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
521#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
522
523/*
524 The first 2 bins are never indexed. The corresponding av_ cells are instead
525 used for bookkeeping. This is not to save space, but to simplify
526 indexing, maintain locality, and avoid some initialization tests.
527*/
528
Stefan Roese37628252008-08-06 14:05:38 +0200529#define top (av_[2]) /* The topmost chunk */
wdenk217c9da2002-10-25 20:35:49 +0000530#define last_remainder (bin_at(1)) /* remainder from last split */
531
532
533/*
534 Because top initially points to its own bin with initial
535 zero size, thus forcing extension on the first malloc request,
536 we avoid having any special code in malloc to check whether
537 it even exists yet. But we still need to in malloc_extend_top.
538*/
539
540#define initial_top ((mchunkptr)(bin_at(0)))
541
542/* Helper macro to initialize bins */
543
544#define IAV(i) bin_at(i), bin_at(i)
545
546static mbinptr av_[NAV * 2 + 2] = {
Kim Phillipsb052b602012-10-29 13:34:32 +0000547 NULL, NULL,
wdenk217c9da2002-10-25 20:35:49 +0000548 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
549 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
550 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
551 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
552 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
553 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
554 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
555 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
556 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
557 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
558 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
559 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
560 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
561 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
562 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
563 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
564};
565
Wolfgang Denkd0813e52010-10-28 20:00:11 +0200566#ifdef CONFIG_NEEDS_MANUAL_RELOC
Gabor Juhosb3b533c2013-01-21 21:10:38 +0000567static void malloc_bin_reloc(void)
wdenk217c9da2002-10-25 20:35:49 +0000568{
Simon Glassb2749d02012-09-04 11:31:07 +0000569 mbinptr *p = &av_[2];
570 size_t i;
571
572 for (i = 2; i < ARRAY_SIZE(av_); ++i, ++p)
573 *p = (mbinptr)((ulong)*p + gd->reloc_off);
wdenk217c9da2002-10-25 20:35:49 +0000574}
Gabor Juhosb3b533c2013-01-21 21:10:38 +0000575#else
576static inline void malloc_bin_reloc(void) {}
Peter Tyser9057cbf2009-09-21 11:20:36 -0500577#endif
Peter Tysera78ded62009-08-21 23:05:19 -0500578
Marek Bykowskib4032a72020-04-29 18:23:07 +0200579#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
580static void malloc_init(void);
581#endif
582
Peter Tysera78ded62009-08-21 23:05:19 -0500583ulong mem_malloc_start = 0;
584ulong mem_malloc_end = 0;
585ulong mem_malloc_brk = 0;
586
587void *sbrk(ptrdiff_t increment)
588{
589 ulong old = mem_malloc_brk;
590 ulong new = old + increment;
591
Kumar Gala293d7ad2010-11-15 18:41:43 -0600592 /*
593 * if we are giving memory back make sure we clear it out since
594 * we set MORECORE_CLEARS to 1
595 */
596 if (increment < 0)
597 memset((void *)new, 0, -increment);
598
Peter Tysera78ded62009-08-21 23:05:19 -0500599 if ((new < mem_malloc_start) || (new > mem_malloc_end))
karl.beldan@gmail.com34e50882010-04-06 22:18:08 +0200600 return (void *)MORECORE_FAILURE;
Peter Tysera78ded62009-08-21 23:05:19 -0500601
602 mem_malloc_brk = new;
603
604 return (void *)old;
605}
wdenk217c9da2002-10-25 20:35:49 +0000606
Peter Tyser781c9b82009-08-21 23:05:21 -0500607void mem_malloc_init(ulong start, ulong size)
608{
609 mem_malloc_start = start;
610 mem_malloc_end = start + size;
611 mem_malloc_brk = start;
612
Marek Bykowskib4032a72020-04-29 18:23:07 +0200613#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
614 malloc_init();
615#endif
616
Thierry Reding8023ec22014-08-26 17:34:22 +0200617 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start,
618 mem_malloc_end);
Przemyslaw Marczak88436782015-03-04 14:01:24 +0100619#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
620 memset((void *)mem_malloc_start, 0x0, size);
621#endif
Gabor Juhosb3b533c2013-01-21 21:10:38 +0000622 malloc_bin_reloc();
Peter Tyser781c9b82009-08-21 23:05:21 -0500623}
Peter Tyser781c9b82009-08-21 23:05:21 -0500624
wdenk217c9da2002-10-25 20:35:49 +0000625/* field-extraction macros */
626
627#define first(b) ((b)->fd)
628#define last(b) ((b)->bk)
629
630/*
631 Indexing into bins
632*/
633
634#define bin_index(sz) \
635(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
636 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
637 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
638 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
639 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
640 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
wdenk57b2d802003-06-27 21:31:46 +0000641 126)
wdenk217c9da2002-10-25 20:35:49 +0000642/*
643 bins for chunks < 512 are all spaced 8 bytes apart, and hold
644 identically sized chunks. This is exploited in malloc.
645*/
646
647#define MAX_SMALLBIN 63
648#define MAX_SMALLBIN_SIZE 512
649#define SMALLBIN_WIDTH 8
650
651#define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
652
653/*
654 Requests are `small' if both the corresponding and the next bin are small
655*/
656
657#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
658
Simon Glass7471cc72014-07-10 22:23:25 -0600659
wdenk217c9da2002-10-25 20:35:49 +0000660
661/*
662 To help compensate for the large number of bins, a one-level index
663 structure is used for bin-by-bin searching. `binblocks' is a
664 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
665 have any (possibly) non-empty bins, so they can be skipped over
666 all at once during during traversals. The bits are NOT always
667 cleared as soon as all bins in a block are empty, but instead only
668 when all are noticed to be empty during traversal in malloc.
669*/
670
671#define BINBLOCKWIDTH 4 /* bins per block */
672
Stefan Roese37628252008-08-06 14:05:38 +0200673#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */
674#define binblocks_w (av_[1])
wdenk217c9da2002-10-25 20:35:49 +0000675
676/* bin<->block macros */
677
678#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH))
Stefan Roese37628252008-08-06 14:05:38 +0200679#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
680#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
wdenk217c9da2002-10-25 20:35:49 +0000681
682
Simon Glass7471cc72014-07-10 22:23:25 -0600683
wdenk217c9da2002-10-25 20:35:49 +0000684
685
686/* Other static bookkeeping data */
687
688/* variables holding tunable values */
689
690static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
691static unsigned long top_pad = DEFAULT_TOP_PAD;
692static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
693static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
694
695/* The first value returned from sbrk */
696static char* sbrk_base = (char*)(-1);
697
698/* The maximum memory obtained from system via sbrk */
699static unsigned long max_sbrked_mem = 0;
700
701/* The maximum via either sbrk or mmap */
702static unsigned long max_total_mem = 0;
703
704/* internal working copy of mallinfo */
705static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
706
707/* The total memory obtained from system via sbrk */
708#define sbrked_mem (current_mallinfo.arena)
709
710/* Tracking mmaps */
711
Wolfgang Denk460a9ff2010-06-20 23:33:59 +0200712#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +0000713static unsigned int n_mmaps = 0;
Wolfgang Denk460a9ff2010-06-20 23:33:59 +0200714#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +0000715static unsigned long mmapped_mem = 0;
716#if HAVE_MMAP
717static unsigned int max_n_mmaps = 0;
718static unsigned long max_mmapped_mem = 0;
719#endif
720
Marek Bykowskib4032a72020-04-29 18:23:07 +0200721#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
722static void malloc_init(void)
723{
724 int i, j;
725
726 debug("bins (av_ array) are at %p\n", (void *)av_);
727
728 av_[0] = NULL; av_[1] = NULL;
729 for (i = 2, j = 2; i < NAV * 2 + 2; i += 2, j++) {
730 av_[i] = bin_at(j - 2);
731 av_[i + 1] = bin_at(j - 2);
Simon Glass7471cc72014-07-10 22:23:25 -0600732
Marek Bykowskib4032a72020-04-29 18:23:07 +0200733 /* Just print the first few bins so that
734 * we can see there are alright.
735 */
736 if (i < 10)
737 debug("av_[%d]=%lx av_[%d]=%lx\n",
738 i, (ulong)av_[i],
739 i + 1, (ulong)av_[i + 1]);
740 }
741
742 /* Init the static bookkeeping as well */
743 sbrk_base = (char *)(-1);
744 max_sbrked_mem = 0;
745 max_total_mem = 0;
746#ifdef DEBUG
747 memset((void *)&current_mallinfo, 0, sizeof(struct mallinfo));
748#endif
749}
750#endif
wdenk217c9da2002-10-25 20:35:49 +0000751
752/*
753 Debugging support
754*/
755
756#ifdef DEBUG
757
758
759/*
760 These routines make a number of assertions about the states
761 of data structures that should be true at all times. If any
762 are not true, it's very likely that a user program has somehow
763 trashed memory. (It's also possible that there is a coding error
764 in malloc. In which case, please report it!)
765*/
766
767#if __STD_C
768static void do_check_chunk(mchunkptr p)
769#else
770static void do_check_chunk(p) mchunkptr p;
771#endif
772{
wdenk217c9da2002-10-25 20:35:49 +0000773 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +0000774
775 /* No checkable chunk is mmapped */
776 assert(!chunk_is_mmapped(p));
777
778 /* Check for legal address ... */
779 assert((char*)p >= sbrk_base);
780 if (p != top)
781 assert((char*)p + sz <= (char*)top);
782 else
783 assert((char*)p + sz <= sbrk_base + sbrked_mem);
784
785}
786
787
788#if __STD_C
789static void do_check_free_chunk(mchunkptr p)
790#else
791static void do_check_free_chunk(p) mchunkptr p;
792#endif
793{
794 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +0000795 mchunkptr next = chunk_at_offset(p, sz);
wdenk217c9da2002-10-25 20:35:49 +0000796
797 do_check_chunk(p);
798
799 /* Check whether it claims to be free ... */
800 assert(!inuse(p));
801
802 /* Unless a special marker, must have OK fields */
803 if ((long)sz >= (long)MINSIZE)
804 {
805 assert((sz & MALLOC_ALIGN_MASK) == 0);
806 assert(aligned_OK(chunk2mem(p)));
807 /* ... matching footer field */
808 assert(next->prev_size == sz);
809 /* ... and is fully consolidated */
810 assert(prev_inuse(p));
811 assert (next == top || inuse(next));
812
813 /* ... and has minimally sane links */
814 assert(p->fd->bk == p);
815 assert(p->bk->fd == p);
816 }
817 else /* markers are always of size SIZE_SZ */
818 assert(sz == SIZE_SZ);
819}
820
821#if __STD_C
822static void do_check_inuse_chunk(mchunkptr p)
823#else
824static void do_check_inuse_chunk(p) mchunkptr p;
825#endif
826{
827 mchunkptr next = next_chunk(p);
828 do_check_chunk(p);
829
830 /* Check whether it claims to be in use ... */
831 assert(inuse(p));
832
833 /* ... and is surrounded by OK chunks.
834 Since more things can be checked with free chunks than inuse ones,
835 if an inuse chunk borders them and debug is on, it's worth doing them.
836 */
837 if (!prev_inuse(p))
838 {
839 mchunkptr prv = prev_chunk(p);
840 assert(next_chunk(prv) == p);
841 do_check_free_chunk(prv);
842 }
843 if (next == top)
844 {
845 assert(prev_inuse(next));
846 assert(chunksize(next) >= MINSIZE);
847 }
848 else if (!inuse(next))
849 do_check_free_chunk(next);
850
851}
852
853#if __STD_C
854static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
855#else
856static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
857#endif
858{
wdenk217c9da2002-10-25 20:35:49 +0000859 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
860 long room = sz - s;
wdenk217c9da2002-10-25 20:35:49 +0000861
862 do_check_inuse_chunk(p);
863
864 /* Legal size ... */
865 assert((long)sz >= (long)MINSIZE);
866 assert((sz & MALLOC_ALIGN_MASK) == 0);
867 assert(room >= 0);
868 assert(room < (long)MINSIZE);
869
870 /* ... and alignment */
871 assert(aligned_OK(chunk2mem(p)));
872
873
874 /* ... and was allocated at front of an available chunk */
875 assert(prev_inuse(p));
876
877}
878
879
880#define check_free_chunk(P) do_check_free_chunk(P)
881#define check_inuse_chunk(P) do_check_inuse_chunk(P)
882#define check_chunk(P) do_check_chunk(P)
883#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
884#else
885#define check_free_chunk(P)
886#define check_inuse_chunk(P)
887#define check_chunk(P)
888#define check_malloced_chunk(P,N)
889#endif
890
Simon Glass7471cc72014-07-10 22:23:25 -0600891
wdenk217c9da2002-10-25 20:35:49 +0000892
893/*
894 Macro-based internal utilities
895*/
896
897
898/*
899 Linking chunks in bin lists.
900 Call these only with variables, not arbitrary expressions, as arguments.
901*/
902
903/*
904 Place chunk p of size s in its bin, in size order,
905 putting it ahead of others of same size.
906*/
907
908
909#define frontlink(P, S, IDX, BK, FD) \
910{ \
911 if (S < MAX_SMALLBIN_SIZE) \
912 { \
913 IDX = smallbin_index(S); \
914 mark_binblock(IDX); \
915 BK = bin_at(IDX); \
916 FD = BK->fd; \
917 P->bk = BK; \
918 P->fd = FD; \
919 FD->bk = BK->fd = P; \
920 } \
921 else \
922 { \
923 IDX = bin_index(S); \
924 BK = bin_at(IDX); \
925 FD = BK->fd; \
926 if (FD == BK) mark_binblock(IDX); \
927 else \
928 { \
929 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
930 BK = FD->bk; \
931 } \
932 P->bk = BK; \
933 P->fd = FD; \
934 FD->bk = BK->fd = P; \
935 } \
936}
937
938
939/* take a chunk off a list */
940
941#define unlink(P, BK, FD) \
942{ \
943 BK = P->bk; \
944 FD = P->fd; \
945 FD->bk = BK; \
946 BK->fd = FD; \
947} \
948
949/* Place p as the last remainder */
950
951#define link_last_remainder(P) \
952{ \
953 last_remainder->fd = last_remainder->bk = P; \
954 P->fd = P->bk = last_remainder; \
955}
956
957/* Clear the last_remainder bin */
958
959#define clear_last_remainder \
960 (last_remainder->fd = last_remainder->bk = last_remainder)
961
962
wdenk217c9da2002-10-25 20:35:49 +0000963
964
Simon Glass7471cc72014-07-10 22:23:25 -0600965
wdenk217c9da2002-10-25 20:35:49 +0000966/* Routines dealing with mmap(). */
967
968#if HAVE_MMAP
969
970#if __STD_C
971static mchunkptr mmap_chunk(size_t size)
972#else
973static mchunkptr mmap_chunk(size) size_t size;
974#endif
975{
976 size_t page_mask = malloc_getpagesize - 1;
977 mchunkptr p;
978
979#ifndef MAP_ANONYMOUS
980 static int fd = -1;
981#endif
982
983 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
984
985 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
986 * there is no following chunk whose prev_size field could be used.
987 */
988 size = (size + SIZE_SZ + page_mask) & ~page_mask;
989
990#ifdef MAP_ANONYMOUS
991 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
992 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
993#else /* !MAP_ANONYMOUS */
994 if (fd < 0)
995 {
996 fd = open("/dev/zero", O_RDWR);
997 if(fd < 0) return 0;
998 }
999 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1000#endif
1001
1002 if(p == (mchunkptr)-1) return 0;
1003
1004 n_mmaps++;
1005 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1006
1007 /* We demand that eight bytes into a page must be 8-byte aligned. */
1008 assert(aligned_OK(chunk2mem(p)));
1009
1010 /* The offset to the start of the mmapped region is stored
1011 * in the prev_size field of the chunk; normally it is zero,
1012 * but that can be changed in memalign().
1013 */
1014 p->prev_size = 0;
1015 set_head(p, size|IS_MMAPPED);
1016
1017 mmapped_mem += size;
1018 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1019 max_mmapped_mem = mmapped_mem;
1020 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1021 max_total_mem = mmapped_mem + sbrked_mem;
1022 return p;
1023}
1024
1025#if __STD_C
1026static void munmap_chunk(mchunkptr p)
1027#else
1028static void munmap_chunk(p) mchunkptr p;
1029#endif
1030{
1031 INTERNAL_SIZE_T size = chunksize(p);
1032 int ret;
1033
1034 assert (chunk_is_mmapped(p));
1035 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1036 assert((n_mmaps > 0));
1037 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1038
1039 n_mmaps--;
1040 mmapped_mem -= (size + p->prev_size);
1041
1042 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1043
1044 /* munmap returns non-zero on failure */
1045 assert(ret == 0);
1046}
1047
1048#if HAVE_MREMAP
1049
1050#if __STD_C
1051static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1052#else
1053static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1054#endif
1055{
1056 size_t page_mask = malloc_getpagesize - 1;
1057 INTERNAL_SIZE_T offset = p->prev_size;
1058 INTERNAL_SIZE_T size = chunksize(p);
1059 char *cp;
1060
1061 assert (chunk_is_mmapped(p));
1062 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1063 assert((n_mmaps > 0));
1064 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1065
1066 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1067 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1068
1069 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1070
1071 if (cp == (char *)-1) return 0;
1072
1073 p = (mchunkptr)(cp + offset);
1074
1075 assert(aligned_OK(chunk2mem(p)));
1076
1077 assert((p->prev_size == offset));
1078 set_head(p, (new_size - offset)|IS_MMAPPED);
1079
1080 mmapped_mem -= size + offset;
1081 mmapped_mem += new_size;
1082 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1083 max_mmapped_mem = mmapped_mem;
1084 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1085 max_total_mem = mmapped_mem + sbrked_mem;
1086 return p;
1087}
1088
1089#endif /* HAVE_MREMAP */
1090
1091#endif /* HAVE_MMAP */
1092
wdenk217c9da2002-10-25 20:35:49 +00001093/*
1094 Extend the top-most chunk by obtaining memory from system.
1095 Main interface to sbrk (but see also malloc_trim).
1096*/
1097
1098#if __STD_C
1099static void malloc_extend_top(INTERNAL_SIZE_T nb)
1100#else
1101static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1102#endif
1103{
1104 char* brk; /* return value from sbrk */
1105 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1106 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
1107 char* new_brk; /* return of 2nd sbrk call */
1108 INTERNAL_SIZE_T top_size; /* new size of top chunk */
1109
1110 mchunkptr old_top = top; /* Record state of old top */
1111 INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1112 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
1113
1114 /* Pad request with top_pad plus minimal overhead */
1115
1116 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
1117 unsigned long pagesz = malloc_getpagesize;
1118
1119 /* If not the first time through, round to preserve page boundary */
1120 /* Otherwise, we need to correct to a page size below anyway. */
1121 /* (We also correct below if an intervening foreign sbrk call.) */
1122
1123 if (sbrk_base != (char*)(-1))
1124 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1125
1126 brk = (char*)(MORECORE (sbrk_size));
1127
1128 /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1129 if (brk == (char*)(MORECORE_FAILURE) ||
1130 (brk < old_end && old_top != initial_top))
1131 return;
1132
1133 sbrked_mem += sbrk_size;
1134
1135 if (brk == old_end) /* can just add bytes to current top */
1136 {
1137 top_size = sbrk_size + old_top_size;
1138 set_head(top, top_size | PREV_INUSE);
1139 }
1140 else
1141 {
1142 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
1143 sbrk_base = brk;
1144 else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
1145 sbrked_mem += brk - (char*)old_end;
1146
1147 /* Guarantee alignment of first new chunk made from this space */
1148 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1149 if (front_misalign > 0)
1150 {
1151 correction = (MALLOC_ALIGNMENT) - front_misalign;
1152 brk += correction;
1153 }
1154 else
1155 correction = 0;
1156
1157 /* Guarantee the next brk will be at a page boundary */
1158
1159 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
wdenk57b2d802003-06-27 21:31:46 +00001160 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
wdenk217c9da2002-10-25 20:35:49 +00001161
1162 /* Allocate correction */
1163 new_brk = (char*)(MORECORE (correction));
1164 if (new_brk == (char*)(MORECORE_FAILURE)) return;
1165
1166 sbrked_mem += correction;
1167
1168 top = (mchunkptr)brk;
1169 top_size = new_brk - brk + correction;
1170 set_head(top, top_size | PREV_INUSE);
1171
1172 if (old_top != initial_top)
1173 {
1174
1175 /* There must have been an intervening foreign sbrk call. */
1176 /* A double fencepost is necessary to prevent consolidation */
1177
1178 /* If not enough space to do this, then user did something very wrong */
1179 if (old_top_size < MINSIZE)
1180 {
wdenk57b2d802003-06-27 21:31:46 +00001181 set_head(top, PREV_INUSE); /* will force null return from malloc */
1182 return;
wdenk217c9da2002-10-25 20:35:49 +00001183 }
1184
1185 /* Also keep size a multiple of MALLOC_ALIGNMENT */
1186 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1187 set_head_size(old_top, old_top_size);
1188 chunk_at_offset(old_top, old_top_size )->size =
wdenk57b2d802003-06-27 21:31:46 +00001189 SIZE_SZ|PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +00001190 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
wdenk57b2d802003-06-27 21:31:46 +00001191 SIZE_SZ|PREV_INUSE;
wdenk217c9da2002-10-25 20:35:49 +00001192 /* If possible, release the rest. */
1193 if (old_top_size >= MINSIZE)
wdenk57b2d802003-06-27 21:31:46 +00001194 fREe(chunk2mem(old_top));
wdenk217c9da2002-10-25 20:35:49 +00001195 }
1196 }
1197
1198 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1199 max_sbrked_mem = sbrked_mem;
1200 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1201 max_total_mem = mmapped_mem + sbrked_mem;
1202
1203 /* We always land on a page boundary */
1204 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1205}
1206
1207
Simon Glass7471cc72014-07-10 22:23:25 -06001208
wdenk217c9da2002-10-25 20:35:49 +00001209
1210/* Main public routines */
1211
1212
1213/*
1214 Malloc Algorthim:
1215
1216 The requested size is first converted into a usable form, `nb'.
1217 This currently means to add 4 bytes overhead plus possibly more to
1218 obtain 8-byte alignment and/or to obtain a size of at least
1219 MINSIZE (currently 16 bytes), the smallest allocatable size.
1220 (All fits are considered `exact' if they are within MINSIZE bytes.)
1221
1222 From there, the first successful of the following steps is taken:
1223
1224 1. The bin corresponding to the request size is scanned, and if
wdenk57b2d802003-06-27 21:31:46 +00001225 a chunk of exactly the right size is found, it is taken.
wdenk217c9da2002-10-25 20:35:49 +00001226
1227 2. The most recently remaindered chunk is used if it is big
wdenk57b2d802003-06-27 21:31:46 +00001228 enough. This is a form of (roving) first fit, used only in
1229 the absence of exact fits. Runs of consecutive requests use
1230 the remainder of the chunk used for the previous such request
1231 whenever possible. This limited use of a first-fit style
1232 allocation strategy tends to give contiguous chunks
1233 coextensive lifetimes, which improves locality and can reduce
1234 fragmentation in the long run.
wdenk217c9da2002-10-25 20:35:49 +00001235
1236 3. Other bins are scanned in increasing size order, using a
wdenk57b2d802003-06-27 21:31:46 +00001237 chunk big enough to fulfill the request, and splitting off
1238 any remainder. This search is strictly by best-fit; i.e.,
1239 the smallest (with ties going to approximately the least
1240 recently used) chunk that fits is selected.
wdenk217c9da2002-10-25 20:35:49 +00001241
1242 4. If large enough, the chunk bordering the end of memory
wdenk57b2d802003-06-27 21:31:46 +00001243 (`top') is split off. (This use of `top' is in accord with
1244 the best-fit search rule. In effect, `top' is treated as
1245 larger (and thus less well fitting) than any other available
1246 chunk since it can be extended to be as large as necessary
1247 (up to system limitations).
wdenk217c9da2002-10-25 20:35:49 +00001248
1249 5. If the request size meets the mmap threshold and the
wdenk57b2d802003-06-27 21:31:46 +00001250 system supports mmap, and there are few enough currently
1251 allocated mmapped regions, and a call to mmap succeeds,
1252 the request is allocated via direct memory mapping.
wdenk217c9da2002-10-25 20:35:49 +00001253
1254 6. Otherwise, the top of memory is extended by
wdenk57b2d802003-06-27 21:31:46 +00001255 obtaining more space from the system (normally using sbrk,
1256 but definable to anything else via the MORECORE macro).
1257 Memory is gathered from the system (in system page-sized
1258 units) in a way that allows chunks obtained across different
1259 sbrk calls to be consolidated, but does not require
1260 contiguous memory. Thus, it should be safe to intersperse
1261 mallocs with other sbrk calls.
wdenk217c9da2002-10-25 20:35:49 +00001262
1263
1264 All allocations are made from the the `lowest' part of any found
1265 chunk. (The implementation invariant is that prev_inuse is
1266 always true of any allocated chunk; i.e., that each allocated
1267 chunk borders either a previously allocated and still in-use chunk,
1268 or the base of its memory arena.)
1269
1270*/
1271
1272#if __STD_C
1273Void_t* mALLOc(size_t bytes)
1274#else
1275Void_t* mALLOc(bytes) size_t bytes;
1276#endif
1277{
1278 mchunkptr victim; /* inspected/selected chunk */
1279 INTERNAL_SIZE_T victim_size; /* its size */
1280 int idx; /* index for bin traversal */
1281 mbinptr bin; /* associated bin */
1282 mchunkptr remainder; /* remainder from a split */
1283 long remainder_size; /* its size */
1284 int remainder_index; /* its bin index */
1285 unsigned long block; /* block traverser bit */
1286 int startidx; /* first bin of a traversed block */
1287 mchunkptr fwd; /* misc temp for linking */
1288 mchunkptr bck; /* misc temp for linking */
1289 mbinptr q; /* misc temp */
1290
1291 INTERNAL_SIZE_T nb;
1292
Andy Yan1fa20e4d2017-07-24 17:43:34 +08001293#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Stephen Warren317581e2016-03-05 10:30:53 -07001294 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
Simon Glass94890462014-11-10 17:16:43 -07001295 return malloc_simple(bytes);
Simon Glass863e4042014-07-10 22:23:28 -06001296#endif
1297
Wolfgang Denkb6349422010-01-15 11:20:10 +01001298 /* check if mem_malloc_init() was run */
1299 if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) {
1300 /* not initialized yet */
Kim Phillipsb052b602012-10-29 13:34:32 +00001301 return NULL;
Wolfgang Denkb6349422010-01-15 11:20:10 +01001302 }
1303
Kim Phillipsb052b602012-10-29 13:34:32 +00001304 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001305
1306 nb = request2size(bytes); /* padded request size; */
1307
1308 /* Check for exact match in a bin */
1309
1310 if (is_small_request(nb)) /* Faster version for small requests */
1311 {
1312 idx = smallbin_index(nb);
1313
1314 /* No traversal or size check necessary for small bins. */
1315
1316 q = bin_at(idx);
1317 victim = last(q);
1318
1319 /* Also scan the next one, since it would have a remainder < MINSIZE */
1320 if (victim == q)
1321 {
1322 q = next_bin(q);
1323 victim = last(q);
1324 }
1325 if (victim != q)
1326 {
1327 victim_size = chunksize(victim);
1328 unlink(victim, bck, fwd);
1329 set_inuse_bit_at_offset(victim, victim_size);
1330 check_malloced_chunk(victim, nb);
1331 return chunk2mem(victim);
1332 }
1333
1334 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1335
1336 }
1337 else
1338 {
1339 idx = bin_index(nb);
1340 bin = bin_at(idx);
1341
1342 for (victim = last(bin); victim != bin; victim = victim->bk)
1343 {
1344 victim_size = chunksize(victim);
1345 remainder_size = victim_size - nb;
1346
1347 if (remainder_size >= (long)MINSIZE) /* too big */
1348 {
wdenk57b2d802003-06-27 21:31:46 +00001349 --idx; /* adjust to rescan below after checking last remainder */
1350 break;
wdenk217c9da2002-10-25 20:35:49 +00001351 }
1352
1353 else if (remainder_size >= 0) /* exact fit */
1354 {
wdenk57b2d802003-06-27 21:31:46 +00001355 unlink(victim, bck, fwd);
1356 set_inuse_bit_at_offset(victim, victim_size);
1357 check_malloced_chunk(victim, nb);
1358 return chunk2mem(victim);
wdenk217c9da2002-10-25 20:35:49 +00001359 }
1360 }
1361
1362 ++idx;
1363
1364 }
1365
1366 /* Try to use the last split-off remainder */
1367
1368 if ( (victim = last_remainder->fd) != last_remainder)
1369 {
1370 victim_size = chunksize(victim);
1371 remainder_size = victim_size - nb;
1372
1373 if (remainder_size >= (long)MINSIZE) /* re-split */
1374 {
1375 remainder = chunk_at_offset(victim, nb);
1376 set_head(victim, nb | PREV_INUSE);
1377 link_last_remainder(remainder);
1378 set_head(remainder, remainder_size | PREV_INUSE);
1379 set_foot(remainder, remainder_size);
1380 check_malloced_chunk(victim, nb);
1381 return chunk2mem(victim);
1382 }
1383
1384 clear_last_remainder;
1385
1386 if (remainder_size >= 0) /* exhaust */
1387 {
1388 set_inuse_bit_at_offset(victim, victim_size);
1389 check_malloced_chunk(victim, nb);
1390 return chunk2mem(victim);
1391 }
1392
1393 /* Else place in bin */
1394
1395 frontlink(victim, victim_size, remainder_index, bck, fwd);
1396 }
1397
1398 /*
1399 If there are any possibly nonempty big-enough blocks,
1400 search for best fitting chunk by scanning bins in blockwidth units.
1401 */
1402
Stefan Roese37628252008-08-06 14:05:38 +02001403 if ( (block = idx2binblock(idx)) <= binblocks_r)
wdenk217c9da2002-10-25 20:35:49 +00001404 {
1405
1406 /* Get to the first marked block */
1407
Stefan Roese37628252008-08-06 14:05:38 +02001408 if ( (block & binblocks_r) == 0)
wdenk217c9da2002-10-25 20:35:49 +00001409 {
1410 /* force to an even block boundary */
1411 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1412 block <<= 1;
Stefan Roese37628252008-08-06 14:05:38 +02001413 while ((block & binblocks_r) == 0)
wdenk217c9da2002-10-25 20:35:49 +00001414 {
wdenk57b2d802003-06-27 21:31:46 +00001415 idx += BINBLOCKWIDTH;
1416 block <<= 1;
wdenk217c9da2002-10-25 20:35:49 +00001417 }
1418 }
1419
1420 /* For each possibly nonempty block ... */
1421 for (;;)
1422 {
1423 startidx = idx; /* (track incomplete blocks) */
1424 q = bin = bin_at(idx);
1425
1426 /* For each bin in this block ... */
1427 do
1428 {
wdenk57b2d802003-06-27 21:31:46 +00001429 /* Find and use first big enough chunk ... */
wdenk217c9da2002-10-25 20:35:49 +00001430
wdenk57b2d802003-06-27 21:31:46 +00001431 for (victim = last(bin); victim != bin; victim = victim->bk)
1432 {
1433 victim_size = chunksize(victim);
1434 remainder_size = victim_size - nb;
wdenk217c9da2002-10-25 20:35:49 +00001435
wdenk57b2d802003-06-27 21:31:46 +00001436 if (remainder_size >= (long)MINSIZE) /* split */
1437 {
1438 remainder = chunk_at_offset(victim, nb);
1439 set_head(victim, nb | PREV_INUSE);
1440 unlink(victim, bck, fwd);
1441 link_last_remainder(remainder);
1442 set_head(remainder, remainder_size | PREV_INUSE);
1443 set_foot(remainder, remainder_size);
1444 check_malloced_chunk(victim, nb);
1445 return chunk2mem(victim);
1446 }
wdenk217c9da2002-10-25 20:35:49 +00001447
wdenk57b2d802003-06-27 21:31:46 +00001448 else if (remainder_size >= 0) /* take */
1449 {
1450 set_inuse_bit_at_offset(victim, victim_size);
1451 unlink(victim, bck, fwd);
1452 check_malloced_chunk(victim, nb);
1453 return chunk2mem(victim);
1454 }
wdenk217c9da2002-10-25 20:35:49 +00001455
wdenk57b2d802003-06-27 21:31:46 +00001456 }
wdenk217c9da2002-10-25 20:35:49 +00001457
1458 bin = next_bin(bin);
1459
1460 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1461
1462 /* Clear out the block bit. */
1463
1464 do /* Possibly backtrack to try to clear a partial block */
1465 {
wdenk57b2d802003-06-27 21:31:46 +00001466 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1467 {
Stefan Roese37628252008-08-06 14:05:38 +02001468 av_[1] = (mbinptr)(binblocks_r & ~block);
wdenk57b2d802003-06-27 21:31:46 +00001469 break;
1470 }
1471 --startidx;
wdenk217c9da2002-10-25 20:35:49 +00001472 q = prev_bin(q);
1473 } while (first(q) == q);
1474
1475 /* Get to the next possibly nonempty block */
1476
Stefan Roese37628252008-08-06 14:05:38 +02001477 if ( (block <<= 1) <= binblocks_r && (block != 0) )
wdenk217c9da2002-10-25 20:35:49 +00001478 {
Stefan Roese37628252008-08-06 14:05:38 +02001479 while ((block & binblocks_r) == 0)
wdenk57b2d802003-06-27 21:31:46 +00001480 {
1481 idx += BINBLOCKWIDTH;
1482 block <<= 1;
1483 }
wdenk217c9da2002-10-25 20:35:49 +00001484 }
1485 else
wdenk57b2d802003-06-27 21:31:46 +00001486 break;
wdenk217c9da2002-10-25 20:35:49 +00001487 }
1488 }
1489
1490
1491 /* Try to use top chunk */
1492
1493 /* Require that there be a remainder, ensuring top always exists */
1494 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1495 {
1496
1497#if HAVE_MMAP
1498 /* If big and would otherwise need to extend, try to use mmap instead */
1499 if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001500 (victim = mmap_chunk(nb)))
wdenk217c9da2002-10-25 20:35:49 +00001501 return chunk2mem(victim);
1502#endif
1503
1504 /* Try to extend */
1505 malloc_extend_top(nb);
1506 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
Kim Phillipsb052b602012-10-29 13:34:32 +00001507 return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001508 }
1509
1510 victim = top;
1511 set_head(victim, nb | PREV_INUSE);
1512 top = chunk_at_offset(victim, nb);
1513 set_head(top, remainder_size | PREV_INUSE);
1514 check_malloced_chunk(victim, nb);
1515 return chunk2mem(victim);
1516
1517}
1518
1519
Simon Glass7471cc72014-07-10 22:23:25 -06001520
wdenk217c9da2002-10-25 20:35:49 +00001521
1522/*
1523
1524 free() algorithm :
1525
1526 cases:
1527
1528 1. free(0) has no effect.
1529
1530 2. If the chunk was allocated via mmap, it is release via munmap().
1531
1532 3. If a returned chunk borders the current high end of memory,
wdenk57b2d802003-06-27 21:31:46 +00001533 it is consolidated into the top, and if the total unused
1534 topmost memory exceeds the trim threshold, malloc_trim is
1535 called.
wdenk217c9da2002-10-25 20:35:49 +00001536
1537 4. Other chunks are consolidated as they arrive, and
wdenk57b2d802003-06-27 21:31:46 +00001538 placed in corresponding bins. (This includes the case of
1539 consolidating with the current `last_remainder').
wdenk217c9da2002-10-25 20:35:49 +00001540
1541*/
1542
1543
1544#if __STD_C
1545void fREe(Void_t* mem)
1546#else
1547void fREe(mem) Void_t* mem;
1548#endif
1549{
1550 mchunkptr p; /* chunk corresponding to mem */
1551 INTERNAL_SIZE_T hd; /* its head field */
1552 INTERNAL_SIZE_T sz; /* its size */
1553 int idx; /* its bin index */
1554 mchunkptr next; /* next contiguous chunk */
1555 INTERNAL_SIZE_T nextsz; /* its size */
1556 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1557 mchunkptr bck; /* misc temp for linking */
1558 mchunkptr fwd; /* misc temp for linking */
1559 int islr; /* track whether merging with last_remainder */
1560
Andy Yan1fa20e4d2017-07-24 17:43:34 +08001561#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Simon Glass863e4042014-07-10 22:23:28 -06001562 /* free() is a no-op - all the memory will be freed on relocation */
Simon Glass94890462014-11-10 17:16:43 -07001563 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
Simon Glass863e4042014-07-10 22:23:28 -06001564 return;
1565#endif
1566
Kim Phillipsb052b602012-10-29 13:34:32 +00001567 if (mem == NULL) /* free(0) has no effect */
wdenk217c9da2002-10-25 20:35:49 +00001568 return;
1569
1570 p = mem2chunk(mem);
1571 hd = p->size;
1572
1573#if HAVE_MMAP
1574 if (hd & IS_MMAPPED) /* release mmapped memory. */
1575 {
1576 munmap_chunk(p);
1577 return;
1578 }
1579#endif
1580
1581 check_inuse_chunk(p);
1582
1583 sz = hd & ~PREV_INUSE;
1584 next = chunk_at_offset(p, sz);
1585 nextsz = chunksize(next);
1586
1587 if (next == top) /* merge with top */
1588 {
1589 sz += nextsz;
1590
1591 if (!(hd & PREV_INUSE)) /* consolidate backward */
1592 {
1593 prevsz = p->prev_size;
1594 p = chunk_at_offset(p, -((long) prevsz));
1595 sz += prevsz;
1596 unlink(p, bck, fwd);
1597 }
1598
1599 set_head(p, sz | PREV_INUSE);
1600 top = p;
1601 if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1602 malloc_trim(top_pad);
1603 return;
1604 }
1605
1606 set_head(next, nextsz); /* clear inuse bit */
1607
1608 islr = 0;
1609
1610 if (!(hd & PREV_INUSE)) /* consolidate backward */
1611 {
1612 prevsz = p->prev_size;
1613 p = chunk_at_offset(p, -((long) prevsz));
1614 sz += prevsz;
1615
1616 if (p->fd == last_remainder) /* keep as last_remainder */
1617 islr = 1;
1618 else
1619 unlink(p, bck, fwd);
1620 }
1621
1622 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
1623 {
1624 sz += nextsz;
1625
1626 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1627 {
1628 islr = 1;
1629 link_last_remainder(p);
1630 }
1631 else
1632 unlink(next, bck, fwd);
1633 }
1634
1635
1636 set_head(p, sz | PREV_INUSE);
1637 set_foot(p, sz);
1638 if (!islr)
1639 frontlink(p, sz, idx, bck, fwd);
1640}
1641
1642
wdenk217c9da2002-10-25 20:35:49 +00001643
1644
Simon Glass7471cc72014-07-10 22:23:25 -06001645
wdenk217c9da2002-10-25 20:35:49 +00001646/*
1647
1648 Realloc algorithm:
1649
1650 Chunks that were obtained via mmap cannot be extended or shrunk
1651 unless HAVE_MREMAP is defined, in which case mremap is used.
1652 Otherwise, if their reallocation is for additional space, they are
1653 copied. If for less, they are just left alone.
1654
1655 Otherwise, if the reallocation is for additional space, and the
1656 chunk can be extended, it is, else a malloc-copy-free sequence is
1657 taken. There are several different ways that a chunk could be
1658 extended. All are tried:
1659
1660 * Extending forward into following adjacent free chunk.
1661 * Shifting backwards, joining preceding adjacent space
1662 * Both shifting backwards and extending forward.
1663 * Extending into newly sbrked space
1664
1665 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1666 size argument of zero (re)allocates a minimum-sized chunk.
1667
1668 If the reallocation is for less space, and the new request is for
1669 a `small' (<512 bytes) size, then the newly unused space is lopped
1670 off and freed.
1671
1672 The old unix realloc convention of allowing the last-free'd chunk
1673 to be used as an argument to realloc is no longer supported.
1674 I don't know of any programs still relying on this feature,
1675 and allowing it would also allow too many other incorrect
1676 usages of realloc to be sensible.
1677
1678
1679*/
1680
1681
1682#if __STD_C
1683Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1684#else
1685Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1686#endif
1687{
1688 INTERNAL_SIZE_T nb; /* padded request size */
1689
1690 mchunkptr oldp; /* chunk corresponding to oldmem */
1691 INTERNAL_SIZE_T oldsize; /* its size */
1692
1693 mchunkptr newp; /* chunk to return */
1694 INTERNAL_SIZE_T newsize; /* its size */
1695 Void_t* newmem; /* corresponding user mem */
1696
1697 mchunkptr next; /* next contiguous chunk after oldp */
1698 INTERNAL_SIZE_T nextsize; /* its size */
1699
1700 mchunkptr prev; /* previous contiguous chunk before oldp */
1701 INTERNAL_SIZE_T prevsize; /* its size */
1702
1703 mchunkptr remainder; /* holds split off extra space from newp */
1704 INTERNAL_SIZE_T remainder_size; /* its size */
1705
1706 mchunkptr bck; /* misc temp for linking */
1707 mchunkptr fwd; /* misc temp for linking */
1708
1709#ifdef REALLOC_ZERO_BYTES_FREES
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001710 if (!bytes) {
1711 fREe(oldmem);
1712 return NULL;
1713 }
wdenk217c9da2002-10-25 20:35:49 +00001714#endif
1715
Kim Phillipsb052b602012-10-29 13:34:32 +00001716 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001717
1718 /* realloc of null is supposed to be same as malloc */
Kim Phillipsb052b602012-10-29 13:34:32 +00001719 if (oldmem == NULL) return mALLOc(bytes);
wdenk217c9da2002-10-25 20:35:49 +00001720
Andy Yan1fa20e4d2017-07-24 17:43:34 +08001721#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Simon Glass94890462014-11-10 17:16:43 -07001722 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Simon Glass863e4042014-07-10 22:23:28 -06001723 /* This is harder to support and should not be needed */
1724 panic("pre-reloc realloc() is not supported");
1725 }
1726#endif
1727
wdenk217c9da2002-10-25 20:35:49 +00001728 newp = oldp = mem2chunk(oldmem);
1729 newsize = oldsize = chunksize(oldp);
1730
1731
1732 nb = request2size(bytes);
1733
1734#if HAVE_MMAP
1735 if (chunk_is_mmapped(oldp))
1736 {
1737#if HAVE_MREMAP
1738 newp = mremap_chunk(oldp, nb);
1739 if(newp) return chunk2mem(newp);
1740#endif
1741 /* Note the extra SIZE_SZ overhead. */
1742 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1743 /* Must alloc, copy, free. */
1744 newmem = mALLOc(bytes);
Heinrich Schuchardtb58b9ca2017-11-10 21:46:34 +01001745 if (!newmem)
1746 return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001747 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1748 munmap_chunk(oldp);
1749 return newmem;
1750 }
1751#endif
1752
1753 check_inuse_chunk(oldp);
1754
1755 if ((long)(oldsize) < (long)(nb))
1756 {
1757
1758 /* Try expanding forward */
1759
1760 next = chunk_at_offset(oldp, oldsize);
1761 if (next == top || !inuse(next))
1762 {
1763 nextsize = chunksize(next);
1764
1765 /* Forward into top only if a remainder */
1766 if (next == top)
1767 {
wdenk57b2d802003-06-27 21:31:46 +00001768 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1769 {
1770 newsize += nextsize;
1771 top = chunk_at_offset(oldp, nb);
1772 set_head(top, (newsize - nb) | PREV_INUSE);
1773 set_head_size(oldp, nb);
1774 return chunk2mem(oldp);
1775 }
wdenk217c9da2002-10-25 20:35:49 +00001776 }
1777
1778 /* Forward into next chunk */
1779 else if (((long)(nextsize + newsize) >= (long)(nb)))
1780 {
wdenk57b2d802003-06-27 21:31:46 +00001781 unlink(next, bck, fwd);
1782 newsize += nextsize;
1783 goto split;
wdenk217c9da2002-10-25 20:35:49 +00001784 }
1785 }
1786 else
1787 {
Kim Phillipsb052b602012-10-29 13:34:32 +00001788 next = NULL;
wdenk217c9da2002-10-25 20:35:49 +00001789 nextsize = 0;
1790 }
1791
1792 /* Try shifting backwards. */
1793
1794 if (!prev_inuse(oldp))
1795 {
1796 prev = prev_chunk(oldp);
1797 prevsize = chunksize(prev);
1798
1799 /* try forward + backward first to save a later consolidation */
1800
Kim Phillipsb052b602012-10-29 13:34:32 +00001801 if (next != NULL)
wdenk217c9da2002-10-25 20:35:49 +00001802 {
wdenk57b2d802003-06-27 21:31:46 +00001803 /* into top */
1804 if (next == top)
1805 {
1806 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1807 {
1808 unlink(prev, bck, fwd);
1809 newp = prev;
1810 newsize += prevsize + nextsize;
1811 newmem = chunk2mem(newp);
1812 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1813 top = chunk_at_offset(newp, nb);
1814 set_head(top, (newsize - nb) | PREV_INUSE);
1815 set_head_size(newp, nb);
1816 return newmem;
1817 }
1818 }
wdenk217c9da2002-10-25 20:35:49 +00001819
wdenk57b2d802003-06-27 21:31:46 +00001820 /* into next chunk */
1821 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1822 {
1823 unlink(next, bck, fwd);
1824 unlink(prev, bck, fwd);
1825 newp = prev;
1826 newsize += nextsize + prevsize;
1827 newmem = chunk2mem(newp);
1828 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1829 goto split;
1830 }
wdenk217c9da2002-10-25 20:35:49 +00001831 }
1832
1833 /* backward only */
Kim Phillipsb052b602012-10-29 13:34:32 +00001834 if (prev != NULL && (long)(prevsize + newsize) >= (long)nb)
wdenk217c9da2002-10-25 20:35:49 +00001835 {
wdenk57b2d802003-06-27 21:31:46 +00001836 unlink(prev, bck, fwd);
1837 newp = prev;
1838 newsize += prevsize;
1839 newmem = chunk2mem(newp);
1840 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1841 goto split;
wdenk217c9da2002-10-25 20:35:49 +00001842 }
1843 }
1844
1845 /* Must allocate */
1846
1847 newmem = mALLOc (bytes);
1848
Kim Phillipsb052b602012-10-29 13:34:32 +00001849 if (newmem == NULL) /* propagate failure */
1850 return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001851
1852 /* Avoid copy if newp is next chunk after oldp. */
1853 /* (This can only happen when new chunk is sbrk'ed.) */
1854
1855 if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1856 {
1857 newsize += chunksize(newp);
1858 newp = oldp;
1859 goto split;
1860 }
1861
1862 /* Otherwise copy, free, and exit */
1863 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1864 fREe(oldmem);
1865 return newmem;
1866 }
1867
1868
1869 split: /* split off extra room in old or expanded chunk */
1870
1871 if (newsize - nb >= MINSIZE) /* split off remainder */
1872 {
1873 remainder = chunk_at_offset(newp, nb);
1874 remainder_size = newsize - nb;
1875 set_head_size(newp, nb);
1876 set_head(remainder, remainder_size | PREV_INUSE);
1877 set_inuse_bit_at_offset(remainder, remainder_size);
1878 fREe(chunk2mem(remainder)); /* let free() deal with it */
1879 }
1880 else
1881 {
1882 set_head_size(newp, newsize);
1883 set_inuse_bit_at_offset(newp, newsize);
1884 }
1885
1886 check_inuse_chunk(newp);
1887 return chunk2mem(newp);
1888}
1889
1890
Simon Glass7471cc72014-07-10 22:23:25 -06001891
wdenk217c9da2002-10-25 20:35:49 +00001892
1893/*
1894
1895 memalign algorithm:
1896
1897 memalign requests more than enough space from malloc, finds a spot
1898 within that chunk that meets the alignment request, and then
1899 possibly frees the leading and trailing space.
1900
1901 The alignment argument must be a power of two. This property is not
1902 checked by memalign, so misuse may result in random runtime errors.
1903
1904 8-byte alignment is guaranteed by normal malloc calls, so don't
1905 bother calling memalign with an argument of 8 or less.
1906
1907 Overreliance on memalign is a sure way to fragment space.
1908
1909*/
1910
1911
1912#if __STD_C
1913Void_t* mEMALIGn(size_t alignment, size_t bytes)
1914#else
1915Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
1916#endif
1917{
1918 INTERNAL_SIZE_T nb; /* padded request size */
1919 char* m; /* memory returned by malloc call */
1920 mchunkptr p; /* corresponding chunk */
1921 char* brk; /* alignment point within p */
1922 mchunkptr newp; /* chunk to return */
1923 INTERNAL_SIZE_T newsize; /* its size */
1924 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
1925 mchunkptr remainder; /* spare room at end to split off */
1926 long remainder_size; /* its size */
1927
Kim Phillipsb052b602012-10-29 13:34:32 +00001928 if ((long)bytes < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00001929
Ley Foon Tan2427ec62018-05-18 18:03:12 +08001930#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1931 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Andreas Dannenbergecc27402019-03-27 13:17:26 -05001932 return memalign_simple(alignment, bytes);
Ley Foon Tan2427ec62018-05-18 18:03:12 +08001933 }
1934#endif
1935
wdenk217c9da2002-10-25 20:35:49 +00001936 /* If need less alignment than we give anyway, just relay to malloc */
1937
1938 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
1939
1940 /* Otherwise, ensure that it is at least a minimum chunk size */
1941
1942 if (alignment < MINSIZE) alignment = MINSIZE;
1943
1944 /* Call malloc with worst case padding to hit alignment. */
1945
1946 nb = request2size(bytes);
1947 m = (char*)(mALLOc(nb + alignment + MINSIZE));
1948
Stephen Warren54ca0c72016-01-25 14:03:42 -07001949 /*
1950 * The attempt to over-allocate (with a size large enough to guarantee the
1951 * ability to find an aligned region within allocated memory) failed.
1952 *
1953 * Try again, this time only allocating exactly the size the user wants. If
1954 * the allocation now succeeds and just happens to be aligned, we can still
1955 * fulfill the user's request.
1956 */
1957 if (m == NULL) {
Stephen Warren44628b82016-04-25 15:55:42 -06001958 size_t extra, extra2;
Stephen Warren54ca0c72016-01-25 14:03:42 -07001959 /*
1960 * Use bytes not nb, since mALLOc internally calls request2size too, and
1961 * each call increases the size to allocate, to account for the header.
1962 */
1963 m = (char*)(mALLOc(bytes));
1964 /* Aligned -> return it */
1965 if ((((unsigned long)(m)) % alignment) == 0)
1966 return m;
Stephen Warren44628b82016-04-25 15:55:42 -06001967 /*
1968 * Otherwise, try again, requesting enough extra space to be able to
1969 * acquire alignment.
1970 */
Stephen Warren54ca0c72016-01-25 14:03:42 -07001971 fREe(m);
Stephen Warren44628b82016-04-25 15:55:42 -06001972 /* Add in extra bytes to match misalignment of unexpanded allocation */
1973 extra = alignment - (((unsigned long)(m)) % alignment);
1974 m = (char*)(mALLOc(bytes + extra));
1975 /*
1976 * m might not be the same as before. Validate that the previous value of
1977 * extra still works for the current value of m.
1978 * If (!m), extra2=alignment so
1979 */
1980 if (m) {
1981 extra2 = alignment - (((unsigned long)(m)) % alignment);
1982 if (extra2 > extra) {
1983 fREe(m);
1984 m = NULL;
1985 }
1986 }
1987 /* Fall through to original NULL check and chunk splitting logic */
Stephen Warren54ca0c72016-01-25 14:03:42 -07001988 }
1989
Kim Phillipsb052b602012-10-29 13:34:32 +00001990 if (m == NULL) return NULL; /* propagate failure */
wdenk217c9da2002-10-25 20:35:49 +00001991
1992 p = mem2chunk(m);
1993
1994 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
1995 {
1996#if HAVE_MMAP
1997 if(chunk_is_mmapped(p))
1998 return chunk2mem(p); /* nothing more to do */
1999#endif
2000 }
2001 else /* misaligned */
2002 {
2003 /*
2004 Find an aligned spot inside chunk.
2005 Since we need to give back leading space in a chunk of at
2006 least MINSIZE, if the first calculation places us at
2007 a spot with less than MINSIZE leader, we can move to the
2008 next aligned spot -- we've allocated enough total room so that
2009 this is always possible.
2010 */
2011
2012 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
2013 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
2014
2015 newp = (mchunkptr)brk;
2016 leadsize = brk - (char*)(p);
2017 newsize = chunksize(p) - leadsize;
2018
2019#if HAVE_MMAP
2020 if(chunk_is_mmapped(p))
2021 {
2022 newp->prev_size = p->prev_size + leadsize;
2023 set_head(newp, newsize|IS_MMAPPED);
2024 return chunk2mem(newp);
2025 }
2026#endif
2027
2028 /* give back leader, use the rest */
2029
2030 set_head(newp, newsize | PREV_INUSE);
2031 set_inuse_bit_at_offset(newp, newsize);
2032 set_head_size(p, leadsize);
2033 fREe(chunk2mem(p));
2034 p = newp;
2035
2036 assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
2037 }
2038
2039 /* Also give back spare room at the end */
2040
2041 remainder_size = chunksize(p) - nb;
2042
2043 if (remainder_size >= (long)MINSIZE)
2044 {
2045 remainder = chunk_at_offset(p, nb);
2046 set_head(remainder, remainder_size | PREV_INUSE);
2047 set_head_size(p, nb);
2048 fREe(chunk2mem(remainder));
2049 }
2050
2051 check_inuse_chunk(p);
2052 return chunk2mem(p);
2053
2054}
2055
wdenk217c9da2002-10-25 20:35:49 +00002056
2057
Simon Glass7471cc72014-07-10 22:23:25 -06002058
wdenk217c9da2002-10-25 20:35:49 +00002059/*
2060 valloc just invokes memalign with alignment argument equal
2061 to the page size of the system (or as near to this as can
2062 be figured out from all the includes/defines above.)
2063*/
2064
2065#if __STD_C
2066Void_t* vALLOc(size_t bytes)
2067#else
2068Void_t* vALLOc(bytes) size_t bytes;
2069#endif
2070{
2071 return mEMALIGn (malloc_getpagesize, bytes);
2072}
2073
2074/*
2075 pvalloc just invokes valloc for the nearest pagesize
2076 that will accommodate request
2077*/
2078
2079
2080#if __STD_C
2081Void_t* pvALLOc(size_t bytes)
2082#else
2083Void_t* pvALLOc(bytes) size_t bytes;
2084#endif
2085{
2086 size_t pagesize = malloc_getpagesize;
2087 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2088}
2089
2090/*
2091
2092 calloc calls malloc, then zeroes out the allocated chunk.
2093
2094*/
2095
2096#if __STD_C
2097Void_t* cALLOc(size_t n, size_t elem_size)
2098#else
2099Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
2100#endif
2101{
2102 mchunkptr p;
2103 INTERNAL_SIZE_T csz;
2104
2105 INTERNAL_SIZE_T sz = n * elem_size;
2106
2107
2108 /* check if expand_top called, in which case don't need to clear */
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002109#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
wdenk217c9da2002-10-25 20:35:49 +00002110#if MORECORE_CLEARS
2111 mchunkptr oldtop = top;
2112 INTERNAL_SIZE_T oldtopsize = chunksize(top);
2113#endif
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002114#endif
wdenk217c9da2002-10-25 20:35:49 +00002115 Void_t* mem = mALLOc (sz);
2116
Kim Phillipsb052b602012-10-29 13:34:32 +00002117 if ((long)n < 0) return NULL;
wdenk217c9da2002-10-25 20:35:49 +00002118
Kim Phillipsb052b602012-10-29 13:34:32 +00002119 if (mem == NULL)
2120 return NULL;
wdenk217c9da2002-10-25 20:35:49 +00002121 else
2122 {
Andy Yan1fa20e4d2017-07-24 17:43:34 +08002123#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Simon Glass94890462014-11-10 17:16:43 -07002124 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
Simon Goldschmidt6b890842019-10-25 21:23:35 +02002125 memset(mem, 0, sz);
Simon Glass863e4042014-07-10 22:23:28 -06002126 return mem;
2127 }
2128#endif
wdenk217c9da2002-10-25 20:35:49 +00002129 p = mem2chunk(mem);
2130
2131 /* Two optional cases in which clearing not necessary */
2132
2133
2134#if HAVE_MMAP
2135 if (chunk_is_mmapped(p)) return mem;
2136#endif
2137
2138 csz = chunksize(p);
2139
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002140#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
wdenk217c9da2002-10-25 20:35:49 +00002141#if MORECORE_CLEARS
2142 if (p == oldtop && csz > oldtopsize)
2143 {
2144 /* clear only the bytes from non-freshly-sbrked memory */
2145 csz = oldtopsize;
2146 }
2147#endif
Przemyslaw Marczak88436782015-03-04 14:01:24 +01002148#endif
wdenk217c9da2002-10-25 20:35:49 +00002149
2150 MALLOC_ZERO(mem, csz - SIZE_SZ);
2151 return mem;
2152 }
2153}
2154
2155/*
2156
2157 cfree just calls free. It is needed/defined on some systems
2158 that pair it with calloc, presumably for odd historical reasons.
2159
2160*/
2161
2162#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2163#if __STD_C
2164void cfree(Void_t *mem)
2165#else
2166void cfree(mem) Void_t *mem;
2167#endif
2168{
2169 fREe(mem);
2170}
2171#endif
2172
Simon Glass7471cc72014-07-10 22:23:25 -06002173
wdenk217c9da2002-10-25 20:35:49 +00002174
2175/*
2176
2177 Malloc_trim gives memory back to the system (via negative
2178 arguments to sbrk) if there is unused memory at the `high' end of
2179 the malloc pool. You can call this after freeing large blocks of
2180 memory to potentially reduce the system-level memory requirements
2181 of a program. However, it cannot guarantee to reduce memory. Under
2182 some allocation patterns, some large free blocks of memory will be
2183 locked between two used chunks, so they cannot be given back to
2184 the system.
2185
2186 The `pad' argument to malloc_trim represents the amount of free
2187 trailing space to leave untrimmed. If this argument is zero,
2188 only the minimum amount of memory to maintain internal data
2189 structures will be left (one page or less). Non-zero arguments
2190 can be supplied to maintain enough trailing space to service
2191 future expected allocations without having to re-obtain memory
2192 from the system.
2193
2194 Malloc_trim returns 1 if it actually released any memory, else 0.
2195
2196*/
2197
2198#if __STD_C
2199int malloc_trim(size_t pad)
2200#else
2201int malloc_trim(pad) size_t pad;
2202#endif
2203{
2204 long top_size; /* Amount of top-most memory */
2205 long extra; /* Amount to release */
2206 char* current_brk; /* address returned by pre-check sbrk call */
2207 char* new_brk; /* address returned by negative sbrk call */
2208
2209 unsigned long pagesz = malloc_getpagesize;
2210
2211 top_size = chunksize(top);
2212 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2213
2214 if (extra < (long)pagesz) /* Not enough memory to release */
2215 return 0;
2216
2217 else
2218 {
2219 /* Test to make sure no one else called sbrk */
2220 current_brk = (char*)(MORECORE (0));
2221 if (current_brk != (char*)(top) + top_size)
2222 return 0; /* Apparently we don't own memory; must fail */
2223
2224 else
2225 {
2226 new_brk = (char*)(MORECORE (-extra));
2227
2228 if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2229 {
wdenk57b2d802003-06-27 21:31:46 +00002230 /* Try to figure out what we have */
2231 current_brk = (char*)(MORECORE (0));
2232 top_size = current_brk - (char*)top;
2233 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2234 {
2235 sbrked_mem = current_brk - sbrk_base;
2236 set_head(top, top_size | PREV_INUSE);
2237 }
2238 check_chunk(top);
2239 return 0;
wdenk217c9da2002-10-25 20:35:49 +00002240 }
2241
2242 else
2243 {
wdenk57b2d802003-06-27 21:31:46 +00002244 /* Success. Adjust top accordingly. */
2245 set_head(top, (top_size - extra) | PREV_INUSE);
2246 sbrked_mem -= extra;
2247 check_chunk(top);
2248 return 1;
wdenk217c9da2002-10-25 20:35:49 +00002249 }
2250 }
2251 }
2252}
2253
Simon Glass7471cc72014-07-10 22:23:25 -06002254
wdenk217c9da2002-10-25 20:35:49 +00002255
2256/*
2257 malloc_usable_size:
2258
2259 This routine tells you how many bytes you can actually use in an
2260 allocated chunk, which may be more than you requested (although
2261 often not). You can use this many bytes without worrying about
2262 overwriting other allocated objects. Not a particularly great
2263 programming practice, but still sometimes useful.
2264
2265*/
2266
2267#if __STD_C
2268size_t malloc_usable_size(Void_t* mem)
2269#else
2270size_t malloc_usable_size(mem) Void_t* mem;
2271#endif
2272{
2273 mchunkptr p;
Kim Phillipsb052b602012-10-29 13:34:32 +00002274 if (mem == NULL)
wdenk217c9da2002-10-25 20:35:49 +00002275 return 0;
2276 else
2277 {
2278 p = mem2chunk(mem);
2279 if(!chunk_is_mmapped(p))
2280 {
2281 if (!inuse(p)) return 0;
2282 check_inuse_chunk(p);
2283 return chunksize(p) - SIZE_SZ;
2284 }
2285 return chunksize(p) - 2*SIZE_SZ;
2286 }
2287}
2288
2289
Simon Glass7471cc72014-07-10 22:23:25 -06002290
wdenk217c9da2002-10-25 20:35:49 +00002291
2292/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2293
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002294#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +00002295static void malloc_update_mallinfo()
2296{
2297 int i;
2298 mbinptr b;
2299 mchunkptr p;
2300#ifdef DEBUG
2301 mchunkptr q;
2302#endif
2303
2304 INTERNAL_SIZE_T avail = chunksize(top);
2305 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2306
2307 for (i = 1; i < NAV; ++i)
2308 {
2309 b = bin_at(i);
2310 for (p = last(b); p != b; p = p->bk)
2311 {
2312#ifdef DEBUG
2313 check_free_chunk(p);
2314 for (q = next_chunk(p);
wdenk57b2d802003-06-27 21:31:46 +00002315 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2316 q = next_chunk(q))
2317 check_inuse_chunk(q);
wdenk217c9da2002-10-25 20:35:49 +00002318#endif
2319 avail += chunksize(p);
2320 navail++;
2321 }
2322 }
2323
2324 current_mallinfo.ordblks = navail;
2325 current_mallinfo.uordblks = sbrked_mem - avail;
2326 current_mallinfo.fordblks = avail;
2327 current_mallinfo.hblks = n_mmaps;
2328 current_mallinfo.hblkhd = mmapped_mem;
2329 current_mallinfo.keepcost = chunksize(top);
2330
2331}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002332#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002333
Simon Glass7471cc72014-07-10 22:23:25 -06002334
wdenk217c9da2002-10-25 20:35:49 +00002335
2336/*
2337
2338 malloc_stats:
2339
2340 Prints on the amount of space obtain from the system (both
2341 via sbrk and mmap), the maximum amount (which may be more than
2342 current if malloc_trim and/or munmap got called), the maximum
2343 number of simultaneous mmap regions used, and the current number
2344 of bytes allocated via malloc (or realloc, etc) but not yet
2345 freed. (Note that this is the number of bytes allocated, not the
2346 number requested. It will be larger than the number requested
2347 because of alignment and bookkeeping overhead.)
2348
2349*/
2350
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002351#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +00002352void malloc_stats()
2353{
2354 malloc_update_mallinfo();
2355 printf("max system bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002356 (unsigned int)(max_total_mem));
wdenk217c9da2002-10-25 20:35:49 +00002357 printf("system bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002358 (unsigned int)(sbrked_mem + mmapped_mem));
wdenk217c9da2002-10-25 20:35:49 +00002359 printf("in use bytes = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002360 (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
wdenk217c9da2002-10-25 20:35:49 +00002361#if HAVE_MMAP
2362 printf("max mmap regions = %10u\n",
wdenk57b2d802003-06-27 21:31:46 +00002363 (unsigned int)max_n_mmaps);
wdenk217c9da2002-10-25 20:35:49 +00002364#endif
2365}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002366#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002367
2368/*
2369 mallinfo returns a copy of updated current mallinfo.
2370*/
2371
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002372#ifdef DEBUG
wdenk217c9da2002-10-25 20:35:49 +00002373struct mallinfo mALLINFo()
2374{
2375 malloc_update_mallinfo();
2376 return current_mallinfo;
2377}
Wolfgang Denk460a9ff2010-06-20 23:33:59 +02002378#endif /* DEBUG */
wdenk217c9da2002-10-25 20:35:49 +00002379
2380
Simon Glass7471cc72014-07-10 22:23:25 -06002381
wdenk217c9da2002-10-25 20:35:49 +00002382
2383/*
2384 mallopt:
2385
2386 mallopt is the general SVID/XPG interface to tunable parameters.
2387 The format is to provide a (parameter-number, parameter-value) pair.
2388 mallopt then sets the corresponding parameter to the argument
2389 value if it can (i.e., so long as the value is meaningful),
2390 and returns 1 if successful else 0.
2391
2392 See descriptions of tunable parameters above.
2393
2394*/
2395
2396#if __STD_C
2397int mALLOPt(int param_number, int value)
2398#else
2399int mALLOPt(param_number, value) int param_number; int value;
2400#endif
2401{
2402 switch(param_number)
2403 {
2404 case M_TRIM_THRESHOLD:
2405 trim_threshold = value; return 1;
2406 case M_TOP_PAD:
2407 top_pad = value; return 1;
2408 case M_MMAP_THRESHOLD:
2409 mmap_threshold = value; return 1;
2410 case M_MMAP_MAX:
2411#if HAVE_MMAP
2412 n_mmaps_max = value; return 1;
2413#else
2414 if (value != 0) return 0; else n_mmaps_max = value; return 1;
2415#endif
2416
2417 default:
2418 return 0;
2419 }
2420}
2421
Simon Glassd1d087d2015-02-27 22:06:36 -07002422int initf_malloc(void)
2423{
Andy Yan1fa20e4d2017-07-24 17:43:34 +08002424#if CONFIG_VAL(SYS_MALLOC_F_LEN)
Simon Glassd1d087d2015-02-27 22:06:36 -07002425 assert(gd->malloc_base); /* Set up by crt0.S */
Andy Yan1fa20e4d2017-07-24 17:43:34 +08002426 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN);
Simon Glassd1d087d2015-02-27 22:06:36 -07002427 gd->malloc_ptr = 0;
2428#endif
2429
2430 return 0;
2431}
2432
wdenk217c9da2002-10-25 20:35:49 +00002433/*
2434
2435History:
2436
2437 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
2438 * return null for negative arguments
2439 * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
wdenk57b2d802003-06-27 21:31:46 +00002440 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2441 (e.g. WIN32 platforms)
2442 * Cleanup up header file inclusion for WIN32 platforms
2443 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2444 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2445 memory allocation routines
2446 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2447 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
wdenk217c9da2002-10-25 20:35:49 +00002448 usage of 'assert' in non-WIN32 code
wdenk57b2d802003-06-27 21:31:46 +00002449 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2450 avoid infinite loop
wdenk217c9da2002-10-25 20:35:49 +00002451 * Always call 'fREe()' rather than 'free()'
2452
2453 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
2454 * Fixed ordering problem with boundary-stamping
2455
2456 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
2457 * Added pvalloc, as recommended by H.J. Liu
2458 * Added 64bit pointer support mainly from Wolfram Gloger
2459 * Added anonymously donated WIN32 sbrk emulation
2460 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2461 * malloc_extend_top: fix mask error that caused wastage after
wdenk57b2d802003-06-27 21:31:46 +00002462 foreign sbrks
wdenk217c9da2002-10-25 20:35:49 +00002463 * Add linux mremap support code from HJ Liu
2464
2465 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
2466 * Integrated most documentation with the code.
2467 * Add support for mmap, with help from
wdenk57b2d802003-06-27 21:31:46 +00002468 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
wdenk217c9da2002-10-25 20:35:49 +00002469 * Use last_remainder in more cases.
2470 * Pack bins using idea from colin@nyx10.cs.du.edu
2471 * Use ordered bins instead of best-fit threshhold
2472 * Eliminate block-local decls to simplify tracing and debugging.
2473 * Support another case of realloc via move into top
2474 * Fix error occuring when initial sbrk_base not word-aligned.
2475 * Rely on page size for units instead of SBRK_UNIT to
wdenk57b2d802003-06-27 21:31:46 +00002476 avoid surprises about sbrk alignment conventions.
wdenk217c9da2002-10-25 20:35:49 +00002477 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
wdenk57b2d802003-06-27 21:31:46 +00002478 (raymond@es.ele.tue.nl) for the suggestion.
wdenk217c9da2002-10-25 20:35:49 +00002479 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2480 * More precautions for cases where other routines call sbrk,
wdenk57b2d802003-06-27 21:31:46 +00002481 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
wdenk217c9da2002-10-25 20:35:49 +00002482 * Added macros etc., allowing use in linux libc from
wdenk57b2d802003-06-27 21:31:46 +00002483 H.J. Lu (hjl@gnu.ai.mit.edu)
wdenk217c9da2002-10-25 20:35:49 +00002484 * Inverted this history list
2485
2486 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
2487 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2488 * Removed all preallocation code since under current scheme
wdenk57b2d802003-06-27 21:31:46 +00002489 the work required to undo bad preallocations exceeds
2490 the work saved in good cases for most test programs.
wdenk217c9da2002-10-25 20:35:49 +00002491 * No longer use return list or unconsolidated bins since
wdenk57b2d802003-06-27 21:31:46 +00002492 no scheme using them consistently outperforms those that don't
2493 given above changes.
wdenk217c9da2002-10-25 20:35:49 +00002494 * Use best fit for very large chunks to prevent some worst-cases.
2495 * Added some support for debugging
2496
2497 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
2498 * Removed footers when chunks are in use. Thanks to
wdenk57b2d802003-06-27 21:31:46 +00002499 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
wdenk217c9da2002-10-25 20:35:49 +00002500
2501 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
2502 * Added malloc_trim, with help from Wolfram Gloger
wdenk57b2d802003-06-27 21:31:46 +00002503 (wmglo@Dent.MED.Uni-Muenchen.DE).
wdenk217c9da2002-10-25 20:35:49 +00002504
2505 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
2506
2507 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
2508 * realloc: try to expand in both directions
2509 * malloc: swap order of clean-bin strategy;
2510 * realloc: only conditionally expand backwards
2511 * Try not to scavenge used bins
2512 * Use bin counts as a guide to preallocation
2513 * Occasionally bin return list chunks in first scan
2514 * Add a few optimizations from colin@nyx10.cs.du.edu
2515
2516 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
2517 * faster bin computation & slightly different binning
2518 * merged all consolidations to one part of malloc proper
wdenk57b2d802003-06-27 21:31:46 +00002519 (eliminating old malloc_find_space & malloc_clean_bin)
wdenk217c9da2002-10-25 20:35:49 +00002520 * Scan 2 returns chunks (not just 1)
2521 * Propagate failure in realloc if malloc returns 0
2522 * Add stuff to allow compilation on non-ANSI compilers
wdenk57b2d802003-06-27 21:31:46 +00002523 from kpv@research.att.com
wdenk217c9da2002-10-25 20:35:49 +00002524
2525 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
2526 * removed potential for odd address access in prev_chunk
2527 * removed dependency on getpagesize.h
2528 * misc cosmetics and a bit more internal documentation
2529 * anticosmetics: mangled names in macros to evade debugger strangeness
2530 * tested on sparc, hp-700, dec-mips, rs6000
wdenk57b2d802003-06-27 21:31:46 +00002531 with gcc & native cc (hp, dec only) allowing
2532 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
wdenk217c9da2002-10-25 20:35:49 +00002533
2534 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
2535 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
wdenk57b2d802003-06-27 21:31:46 +00002536 structure of old version, but most details differ.)
wdenk217c9da2002-10-25 20:35:49 +00002537
2538*/