wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 1 | /* |
| 2 | A version of malloc/free/realloc written by Doug Lea and released to the |
| 3 | public domain. Send questions/comments/complaints/performance data |
| 4 | to dl@cs.oswego.edu |
| 5 | |
| 6 | * VERSION 2.6.6 Sun Mar 5 19:10:03 2000 Doug Lea (dl at gee) |
| 7 | |
| 8 | Note: There may be an updated version of this malloc obtainable at |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 9 | ftp://g.oswego.edu/pub/misc/malloc.c |
| 10 | Check before installing! |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 11 | |
| 12 | * Why use this malloc? |
| 13 | |
| 14 | This is not the fastest, most space-conserving, most portable, or |
| 15 | most tunable malloc ever written. However it is among the fastest |
| 16 | while also being among the most space-conserving, portable and tunable. |
| 17 | Consistent balance across these factors results in a good general-purpose |
| 18 | allocator. For a high-level description, see |
| 19 | http://g.oswego.edu/dl/html/malloc.html |
| 20 | |
| 21 | * Synopsis of public routines |
| 22 | |
| 23 | (Much fuller descriptions are contained in the program documentation below.) |
| 24 | |
| 25 | malloc(size_t n); |
| 26 | Return a pointer to a newly allocated chunk of at least n bytes, or null |
| 27 | if no space is available. |
| 28 | free(Void_t* p); |
| 29 | Release the chunk of memory pointed to by p, or no effect if p is null. |
| 30 | realloc(Void_t* p, size_t n); |
| 31 | Return a pointer to a chunk of size n that contains the same data |
| 32 | as does chunk p up to the minimum of (n, p's size) bytes, or null |
| 33 | if no space is available. The returned pointer may or may not be |
| 34 | the same as p. If p is null, equivalent to malloc. Unless the |
| 35 | #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a |
| 36 | size argument of zero (re)allocates a minimum-sized chunk. |
| 37 | memalign(size_t alignment, size_t n); |
| 38 | Return a pointer to a newly allocated chunk of n bytes, aligned |
| 39 | in accord with the alignment argument, which must be a power of |
| 40 | two. |
| 41 | valloc(size_t n); |
| 42 | Equivalent to memalign(pagesize, n), where pagesize is the page |
| 43 | size of the system (or as near to this as can be figured out from |
| 44 | all the includes/defines below.) |
| 45 | pvalloc(size_t n); |
| 46 | Equivalent to valloc(minimum-page-that-holds(n)), that is, |
| 47 | round up n to nearest pagesize. |
| 48 | calloc(size_t unit, size_t quantity); |
| 49 | Returns a pointer to quantity * unit bytes, with all locations |
| 50 | set to zero. |
| 51 | cfree(Void_t* p); |
| 52 | Equivalent to free(p). |
| 53 | malloc_trim(size_t pad); |
| 54 | Release all but pad bytes of freed top-most memory back |
| 55 | to the system. Return 1 if successful, else 0. |
| 56 | malloc_usable_size(Void_t* p); |
| 57 | Report the number usable allocated bytes associated with allocated |
| 58 | chunk p. This may or may not report more bytes than were requested, |
| 59 | due to alignment and minimum size constraints. |
| 60 | malloc_stats(); |
| 61 | Prints brief summary statistics on stderr. |
| 62 | mallinfo() |
| 63 | Returns (by copy) a struct containing various summary statistics. |
| 64 | mallopt(int parameter_number, int parameter_value) |
| 65 | Changes one of the tunable parameters described below. Returns |
| 66 | 1 if successful in changing the parameter, else 0. |
| 67 | |
| 68 | * Vital statistics: |
| 69 | |
| 70 | Alignment: 8-byte |
| 71 | 8 byte alignment is currently hardwired into the design. This |
| 72 | seems to suffice for all current machines and C compilers. |
| 73 | |
| 74 | Assumed pointer representation: 4 or 8 bytes |
| 75 | Code for 8-byte pointers is untested by me but has worked |
| 76 | reliably by Wolfram Gloger, who contributed most of the |
| 77 | changes supporting this. |
| 78 | |
| 79 | Assumed size_t representation: 4 or 8 bytes |
| 80 | Note that size_t is allowed to be 4 bytes even if pointers are 8. |
| 81 | |
| 82 | Minimum overhead per allocated chunk: 4 or 8 bytes |
| 83 | Each malloced chunk has a hidden overhead of 4 bytes holding size |
| 84 | and status information. |
| 85 | |
| 86 | Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 87 | 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 88 | |
| 89 | When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte |
| 90 | ptrs but 4 byte size) or 24 (for 8/8) additional bytes are |
| 91 | needed; 4 (8) for a trailing size field |
| 92 | and 8 (16) bytes for free list pointers. Thus, the minimum |
| 93 | allocatable size is 16/24/32 bytes. |
| 94 | |
| 95 | Even a request for zero bytes (i.e., malloc(0)) returns a |
| 96 | pointer to something of the minimum allocatable size. |
| 97 | |
| 98 | Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 99 | 8-byte size_t: 2^63 - 16 bytes |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 100 | |
| 101 | It is assumed that (possibly signed) size_t bit values suffice to |
| 102 | represent chunk sizes. `Possibly signed' is due to the fact |
| 103 | that `size_t' may be defined on a system as either a signed or |
| 104 | an unsigned type. To be conservative, values that would appear |
| 105 | as negative numbers are avoided. |
| 106 | Requests for sizes with a negative sign bit when the request |
| 107 | size is treaded as a long will return null. |
| 108 | |
| 109 | Maximum overhead wastage per allocated chunk: normally 15 bytes |
| 110 | |
| 111 | Alignnment demands, plus the minimum allocatable size restriction |
| 112 | make the normal worst-case wastage 15 bytes (i.e., up to 15 |
| 113 | more bytes will be allocated than were requested in malloc), with |
| 114 | two exceptions: |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 115 | 1. Because requests for zero bytes allocate non-zero space, |
| 116 | the worst case wastage for a request of zero bytes is 24 bytes. |
| 117 | 2. For requests >= mmap_threshold that are serviced via |
| 118 | mmap(), the worst case wastage is 8 bytes plus the remainder |
| 119 | from a system page (the minimal mmap unit); typically 4096 bytes. |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 120 | |
| 121 | * Limitations |
| 122 | |
| 123 | Here are some features that are NOT currently supported |
| 124 | |
| 125 | * No user-definable hooks for callbacks and the like. |
| 126 | * No automated mechanism for fully checking that all accesses |
| 127 | to malloced memory stay within their bounds. |
| 128 | * No support for compaction. |
| 129 | |
| 130 | * Synopsis of compile-time options: |
| 131 | |
| 132 | People have reported using previous versions of this malloc on all |
| 133 | versions of Unix, sometimes by tweaking some of the defines |
| 134 | below. It has been tested most extensively on Solaris and |
| 135 | Linux. It is also reported to work on WIN32 platforms. |
| 136 | People have also reported adapting this malloc for use in |
| 137 | stand-alone embedded systems. |
| 138 | |
| 139 | The implementation is in straight, hand-tuned ANSI C. Among other |
| 140 | consequences, it uses a lot of macros. Because of this, to be at |
| 141 | all usable, this code should be compiled using an optimizing compiler |
| 142 | (for example gcc -O2) that can simplify expressions and control |
| 143 | paths. |
| 144 | |
| 145 | __STD_C (default: derived from C compiler defines) |
| 146 | Nonzero if using ANSI-standard C compiler, a C++ compiler, or |
| 147 | a C compiler sufficiently close to ANSI to get away with it. |
| 148 | DEBUG (default: NOT defined) |
| 149 | Define to enable debugging. Adds fairly extensive assertion-based |
| 150 | checking to help track down memory errors, but noticeably slows down |
| 151 | execution. |
| 152 | REALLOC_ZERO_BYTES_FREES (default: NOT defined) |
| 153 | Define this if you think that realloc(p, 0) should be equivalent |
| 154 | to free(p). Otherwise, since malloc returns a unique pointer for |
| 155 | malloc(0), so does realloc(p, 0). |
| 156 | HAVE_MEMCPY (default: defined) |
| 157 | Define if you are not otherwise using ANSI STD C, but still |
| 158 | have memcpy and memset in your C library and want to use them. |
| 159 | Otherwise, simple internal versions are supplied. |
| 160 | USE_MEMCPY (default: 1 if HAVE_MEMCPY is defined, 0 otherwise) |
| 161 | Define as 1 if you want the C library versions of memset and |
| 162 | memcpy called in realloc and calloc (otherwise macro versions are used). |
| 163 | At least on some platforms, the simple macro versions usually |
| 164 | outperform libc versions. |
| 165 | HAVE_MMAP (default: defined as 1) |
| 166 | Define to non-zero to optionally make malloc() use mmap() to |
| 167 | allocate very large blocks. |
| 168 | HAVE_MREMAP (default: defined as 0 unless Linux libc set) |
| 169 | Define to non-zero to optionally make realloc() use mremap() to |
| 170 | reallocate very large blocks. |
| 171 | malloc_getpagesize (default: derived from system #includes) |
| 172 | Either a constant or routine call returning the system page size. |
| 173 | HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined) |
| 174 | Optionally define if you are on a system with a /usr/include/malloc.h |
| 175 | that declares struct mallinfo. It is not at all necessary to |
| 176 | define this even if you do, but will ensure consistency. |
| 177 | INTERNAL_SIZE_T (default: size_t) |
| 178 | Define to a 32-bit type (probably `unsigned int') if you are on a |
| 179 | 64-bit machine, yet do not want or need to allow malloc requests of |
| 180 | greater than 2^31 to be handled. This saves space, especially for |
| 181 | very small chunks. |
| 182 | INTERNAL_LINUX_C_LIB (default: NOT defined) |
| 183 | Defined only when compiled as part of Linux libc. |
| 184 | Also note that there is some odd internal name-mangling via defines |
| 185 | (for example, internally, `malloc' is named `mALLOc') needed |
| 186 | when compiling in this case. These look funny but don't otherwise |
| 187 | affect anything. |
| 188 | WIN32 (default: undefined) |
| 189 | Define this on MS win (95, nt) platforms to compile in sbrk emulation. |
| 190 | LACKS_UNISTD_H (default: undefined if not WIN32) |
| 191 | Define this if your system does not have a <unistd.h>. |
| 192 | LACKS_SYS_PARAM_H (default: undefined if not WIN32) |
| 193 | Define this if your system does not have a <sys/param.h>. |
| 194 | MORECORE (default: sbrk) |
| 195 | The name of the routine to call to obtain more memory from the system. |
| 196 | MORECORE_FAILURE (default: -1) |
| 197 | The value returned upon failure of MORECORE. |
| 198 | MORECORE_CLEARS (default 1) |
York Sun | 4a59809 | 2013-04-01 11:29:11 -0700 | [diff] [blame] | 199 | true (1) if the routine mapped to MORECORE zeroes out memory (which |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 200 | holds for sbrk). |
| 201 | DEFAULT_TRIM_THRESHOLD |
| 202 | DEFAULT_TOP_PAD |
| 203 | DEFAULT_MMAP_THRESHOLD |
| 204 | DEFAULT_MMAP_MAX |
| 205 | Default values of tunable parameters (described in detail below) |
| 206 | controlling interaction with host system routines (sbrk, mmap, etc). |
| 207 | These values may also be changed dynamically via mallopt(). The |
| 208 | preset defaults are those that give best performance for typical |
| 209 | programs/systems. |
| 210 | USE_DL_PREFIX (default: undefined) |
| 211 | Prefix all public routines with the string 'dl'. Useful to |
| 212 | quickly avoid procedure declaration conflicts and linker symbol |
| 213 | conflicts with existing memory allocation routines. |
| 214 | |
| 215 | |
| 216 | */ |
| 217 | |
| 218 | |
Jean-Christophe PLAGNIOL-VILLARD | d93b1d3 | 2009-06-13 12:55:37 +0200 | [diff] [blame] | 219 | #ifndef __MALLOC_H__ |
| 220 | #define __MALLOC_H__ |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 221 | |
| 222 | /* Preliminaries */ |
| 223 | |
| 224 | #ifndef __STD_C |
| 225 | #ifdef __STDC__ |
| 226 | #define __STD_C 1 |
| 227 | #else |
| 228 | #if __cplusplus |
| 229 | #define __STD_C 1 |
| 230 | #else |
| 231 | #define __STD_C 0 |
| 232 | #endif /*__cplusplus*/ |
| 233 | #endif /*__STDC__*/ |
| 234 | #endif /*__STD_C*/ |
| 235 | |
| 236 | #ifndef Void_t |
| 237 | #if (__STD_C || defined(WIN32)) |
| 238 | #define Void_t void |
| 239 | #else |
| 240 | #define Void_t char |
| 241 | #endif |
| 242 | #endif /*Void_t*/ |
| 243 | |
| 244 | #if __STD_C |
| 245 | #include <linux/stddef.h> /* for size_t */ |
| 246 | #else |
| 247 | #include <sys/types.h> |
| 248 | #endif /* __STD_C */ |
| 249 | |
| 250 | #ifdef __cplusplus |
| 251 | extern "C" { |
| 252 | #endif |
| 253 | |
| 254 | #if 0 /* not for U-Boot */ |
| 255 | #include <stdio.h> /* needed for malloc_stats */ |
| 256 | #endif |
| 257 | |
| 258 | |
| 259 | /* |
| 260 | Compile-time options |
| 261 | */ |
| 262 | |
| 263 | |
| 264 | /* |
| 265 | Debugging: |
| 266 | |
| 267 | Because freed chunks may be overwritten with link fields, this |
| 268 | malloc will often die when freed memory is overwritten by user |
| 269 | programs. This can be very effective (albeit in an annoying way) |
| 270 | in helping track down dangling pointers. |
| 271 | |
| 272 | If you compile with -DDEBUG, a number of assertion checks are |
| 273 | enabled that will catch more memory errors. You probably won't be |
| 274 | able to make much sense of the actual assertion errors, but they |
| 275 | should help you locate incorrectly overwritten memory. The |
| 276 | checking is fairly extensive, and will slow down execution |
| 277 | noticeably. Calling malloc_stats or mallinfo with DEBUG set will |
| 278 | attempt to check every non-mmapped allocated and free chunk in the |
| 279 | course of computing the summmaries. (By nature, mmapped regions |
| 280 | cannot be checked very much automatically.) |
| 281 | |
| 282 | Setting DEBUG may also be helpful if you are trying to modify |
| 283 | this code. The assertions in the check routines spell out in more |
| 284 | detail the assumptions and invariants underlying the algorithms. |
| 285 | |
| 286 | */ |
| 287 | |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 288 | /* |
| 289 | INTERNAL_SIZE_T is the word-size used for internal bookkeeping |
| 290 | of chunk sizes. On a 64-bit machine, you can reduce malloc |
| 291 | overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' |
| 292 | at the expense of not being able to handle requests greater than |
| 293 | 2^31. This limitation is hardly ever a concern; you are encouraged |
| 294 | to set this. However, the default version is the same as size_t. |
| 295 | */ |
| 296 | |
| 297 | #ifndef INTERNAL_SIZE_T |
| 298 | #define INTERNAL_SIZE_T size_t |
| 299 | #endif |
| 300 | |
| 301 | /* |
| 302 | REALLOC_ZERO_BYTES_FREES should be set if a call to |
| 303 | realloc with zero bytes should be the same as a call to free. |
| 304 | Some people think it should. Otherwise, since this malloc |
| 305 | returns a unique pointer for malloc(0), so does realloc(p, 0). |
| 306 | */ |
| 307 | |
| 308 | |
| 309 | /* #define REALLOC_ZERO_BYTES_FREES */ |
| 310 | |
| 311 | |
| 312 | /* |
| 313 | WIN32 causes an emulation of sbrk to be compiled in |
| 314 | mmap-based options are not currently supported in WIN32. |
| 315 | */ |
| 316 | |
| 317 | /* #define WIN32 */ |
| 318 | #ifdef WIN32 |
| 319 | #define MORECORE wsbrk |
| 320 | #define HAVE_MMAP 0 |
| 321 | |
| 322 | #define LACKS_UNISTD_H |
| 323 | #define LACKS_SYS_PARAM_H |
| 324 | |
| 325 | /* |
| 326 | Include 'windows.h' to get the necessary declarations for the |
| 327 | Microsoft Visual C++ data structures and routines used in the 'sbrk' |
| 328 | emulation. |
| 329 | |
| 330 | Define WIN32_LEAN_AND_MEAN so that only the essential Microsoft |
| 331 | Visual C++ header files are included. |
| 332 | */ |
| 333 | #define WIN32_LEAN_AND_MEAN |
| 334 | #include <windows.h> |
| 335 | #endif |
| 336 | |
| 337 | |
| 338 | /* |
| 339 | HAVE_MEMCPY should be defined if you are not otherwise using |
| 340 | ANSI STD C, but still have memcpy and memset in your C library |
| 341 | and want to use them in calloc and realloc. Otherwise simple |
| 342 | macro versions are defined here. |
| 343 | |
| 344 | USE_MEMCPY should be defined as 1 if you actually want to |
| 345 | have memset and memcpy called. People report that the macro |
| 346 | versions are often enough faster than libc versions on many |
| 347 | systems that it is better to use them. |
| 348 | |
| 349 | */ |
| 350 | |
| 351 | #define HAVE_MEMCPY |
| 352 | |
| 353 | #ifndef USE_MEMCPY |
| 354 | #ifdef HAVE_MEMCPY |
| 355 | #define USE_MEMCPY 1 |
| 356 | #else |
| 357 | #define USE_MEMCPY 0 |
| 358 | #endif |
| 359 | #endif |
| 360 | |
| 361 | #if (__STD_C || defined(HAVE_MEMCPY)) |
| 362 | |
| 363 | #if __STD_C |
Heinrich Schuchardt | 1ec0bb7 | 2021-02-10 18:59:21 +0100 | [diff] [blame] | 364 | /* U-Boot defines memset() and memcpy in /include/linux/string.h |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 365 | void* memset(void*, int, size_t); |
| 366 | void* memcpy(void*, const void*, size_t); |
Heinrich Schuchardt | 1ec0bb7 | 2021-02-10 18:59:21 +0100 | [diff] [blame] | 367 | */ |
| 368 | #include <linux/string.h> |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 369 | #else |
| 370 | #ifdef WIN32 |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 371 | /* On Win32 platforms, 'memset()' and 'memcpy()' are already declared in */ |
| 372 | /* 'windows.h' */ |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 373 | #else |
| 374 | Void_t* memset(); |
| 375 | Void_t* memcpy(); |
| 376 | #endif |
| 377 | #endif |
| 378 | #endif |
| 379 | |
| 380 | #if USE_MEMCPY |
| 381 | |
| 382 | /* The following macros are only invoked with (2n+1)-multiples of |
| 383 | INTERNAL_SIZE_T units, with a positive integer n. This is exploited |
| 384 | for fast inline execution when n is small. */ |
| 385 | |
| 386 | #define MALLOC_ZERO(charp, nbytes) \ |
| 387 | do { \ |
| 388 | INTERNAL_SIZE_T mzsz = (nbytes); \ |
| 389 | if(mzsz <= 9*sizeof(mzsz)) { \ |
| 390 | INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \ |
| 391 | if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \ |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 392 | *mz++ = 0; \ |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 393 | if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \ |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 394 | *mz++ = 0; \ |
| 395 | if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \ |
| 396 | *mz++ = 0; }}} \ |
| 397 | *mz++ = 0; \ |
| 398 | *mz++ = 0; \ |
| 399 | *mz = 0; \ |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 400 | } else memset((charp), 0, mzsz); \ |
| 401 | } while(0) |
| 402 | |
| 403 | #define MALLOC_COPY(dest,src,nbytes) \ |
| 404 | do { \ |
| 405 | INTERNAL_SIZE_T mcsz = (nbytes); \ |
| 406 | if(mcsz <= 9*sizeof(mcsz)) { \ |
| 407 | INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \ |
| 408 | INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \ |
| 409 | if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 410 | *mcdst++ = *mcsrc++; \ |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 411 | if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 412 | *mcdst++ = *mcsrc++; \ |
| 413 | if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
| 414 | *mcdst++ = *mcsrc++; }}} \ |
| 415 | *mcdst++ = *mcsrc++; \ |
| 416 | *mcdst++ = *mcsrc++; \ |
| 417 | *mcdst = *mcsrc ; \ |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 418 | } else memcpy(dest, src, mcsz); \ |
| 419 | } while(0) |
| 420 | |
| 421 | #else /* !USE_MEMCPY */ |
| 422 | |
| 423 | /* Use Duff's device for good zeroing/copying performance. */ |
| 424 | |
| 425 | #define MALLOC_ZERO(charp, nbytes) \ |
| 426 | do { \ |
| 427 | INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ |
| 428 | long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
| 429 | if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
| 430 | switch (mctmp) { \ |
| 431 | case 0: for(;;) { *mzp++ = 0; \ |
| 432 | case 7: *mzp++ = 0; \ |
| 433 | case 6: *mzp++ = 0; \ |
| 434 | case 5: *mzp++ = 0; \ |
| 435 | case 4: *mzp++ = 0; \ |
| 436 | case 3: *mzp++ = 0; \ |
| 437 | case 2: *mzp++ = 0; \ |
| 438 | case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ |
| 439 | } \ |
| 440 | } while(0) |
| 441 | |
| 442 | #define MALLOC_COPY(dest,src,nbytes) \ |
| 443 | do { \ |
| 444 | INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ |
| 445 | INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ |
| 446 | long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
| 447 | if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
| 448 | switch (mctmp) { \ |
| 449 | case 0: for(;;) { *mcdst++ = *mcsrc++; \ |
| 450 | case 7: *mcdst++ = *mcsrc++; \ |
| 451 | case 6: *mcdst++ = *mcsrc++; \ |
| 452 | case 5: *mcdst++ = *mcsrc++; \ |
| 453 | case 4: *mcdst++ = *mcsrc++; \ |
| 454 | case 3: *mcdst++ = *mcsrc++; \ |
| 455 | case 2: *mcdst++ = *mcsrc++; \ |
| 456 | case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ |
| 457 | } \ |
| 458 | } while(0) |
| 459 | |
| 460 | #endif |
| 461 | |
| 462 | |
| 463 | /* |
| 464 | Define HAVE_MMAP to optionally make malloc() use mmap() to |
| 465 | allocate very large blocks. These will be returned to the |
| 466 | operating system immediately after a free(). |
| 467 | */ |
| 468 | |
| 469 | /*** |
| 470 | #ifndef HAVE_MMAP |
| 471 | #define HAVE_MMAP 1 |
| 472 | #endif |
| 473 | ***/ |
| 474 | #undef HAVE_MMAP /* Not available for U-Boot */ |
| 475 | |
| 476 | /* |
| 477 | Define HAVE_MREMAP to make realloc() use mremap() to re-allocate |
| 478 | large blocks. This is currently only possible on Linux with |
| 479 | kernel versions newer than 1.3.77. |
| 480 | */ |
| 481 | |
| 482 | /*** |
| 483 | #ifndef HAVE_MREMAP |
| 484 | #ifdef INTERNAL_LINUX_C_LIB |
| 485 | #define HAVE_MREMAP 1 |
| 486 | #else |
| 487 | #define HAVE_MREMAP 0 |
| 488 | #endif |
| 489 | #endif |
| 490 | ***/ |
| 491 | #undef HAVE_MREMAP /* Not available for U-Boot */ |
| 492 | |
Marek Vasut | e852ef6 | 2012-03-29 09:28:15 +0000 | [diff] [blame] | 493 | #ifdef HAVE_MMAP |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 494 | |
| 495 | #include <unistd.h> |
| 496 | #include <fcntl.h> |
| 497 | #include <sys/mman.h> |
| 498 | |
| 499 | #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
| 500 | #define MAP_ANONYMOUS MAP_ANON |
| 501 | #endif |
| 502 | |
| 503 | #endif /* HAVE_MMAP */ |
| 504 | |
| 505 | /* |
| 506 | Access to system page size. To the extent possible, this malloc |
| 507 | manages memory from the system in page-size units. |
| 508 | |
| 509 | The following mechanics for getpagesize were adapted from |
| 510 | bsd/gnu getpagesize.h |
| 511 | */ |
| 512 | |
| 513 | #define LACKS_UNISTD_H /* Shortcut for U-Boot */ |
| 514 | #define malloc_getpagesize 4096 |
| 515 | |
| 516 | #ifndef LACKS_UNISTD_H |
| 517 | # include <unistd.h> |
| 518 | #endif |
| 519 | |
| 520 | #ifndef malloc_getpagesize |
| 521 | # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
| 522 | # ifndef _SC_PAGE_SIZE |
| 523 | # define _SC_PAGE_SIZE _SC_PAGESIZE |
| 524 | # endif |
| 525 | # endif |
| 526 | # ifdef _SC_PAGE_SIZE |
| 527 | # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
| 528 | # else |
| 529 | # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
| 530 | extern size_t getpagesize(); |
| 531 | # define malloc_getpagesize getpagesize() |
| 532 | # else |
| 533 | # ifdef WIN32 |
| 534 | # define malloc_getpagesize (4096) /* TBD: Use 'GetSystemInfo' instead */ |
| 535 | # else |
| 536 | # ifndef LACKS_SYS_PARAM_H |
| 537 | # include <sys/param.h> |
| 538 | # endif |
| 539 | # ifdef EXEC_PAGESIZE |
| 540 | # define malloc_getpagesize EXEC_PAGESIZE |
| 541 | # else |
| 542 | # ifdef NBPG |
| 543 | # ifndef CLSIZE |
| 544 | # define malloc_getpagesize NBPG |
| 545 | # else |
| 546 | # define malloc_getpagesize (NBPG * CLSIZE) |
| 547 | # endif |
| 548 | # else |
| 549 | # ifdef NBPC |
| 550 | # define malloc_getpagesize NBPC |
| 551 | # else |
| 552 | # ifdef PAGESIZE |
| 553 | # define malloc_getpagesize PAGESIZE |
| 554 | # else |
| 555 | # define malloc_getpagesize (4096) /* just guess */ |
| 556 | # endif |
| 557 | # endif |
| 558 | # endif |
| 559 | # endif |
| 560 | # endif |
| 561 | # endif |
| 562 | # endif |
| 563 | #endif |
| 564 | |
| 565 | |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 566 | /* |
| 567 | |
| 568 | This version of malloc supports the standard SVID/XPG mallinfo |
| 569 | routine that returns a struct containing the same kind of |
| 570 | information you can get from malloc_stats. It should work on |
| 571 | any SVID/XPG compliant system that has a /usr/include/malloc.h |
| 572 | defining struct mallinfo. (If you'd like to install such a thing |
| 573 | yourself, cut out the preliminary declarations as described above |
| 574 | and below and save them in a malloc.h file. But there's no |
| 575 | compelling reason to bother to do this.) |
| 576 | |
| 577 | The main declaration needed is the mallinfo struct that is returned |
| 578 | (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a |
| 579 | bunch of fields, most of which are not even meaningful in this |
| 580 | version of malloc. Some of these fields are are instead filled by |
| 581 | mallinfo() with other numbers that might possibly be of interest. |
| 582 | |
| 583 | HAVE_USR_INCLUDE_MALLOC_H should be set if you have a |
| 584 | /usr/include/malloc.h file that includes a declaration of struct |
| 585 | mallinfo. If so, it is included; else an SVID2/XPG2 compliant |
| 586 | version is declared below. These must be precisely the same for |
| 587 | mallinfo() to work. |
| 588 | |
| 589 | */ |
| 590 | |
| 591 | /* #define HAVE_USR_INCLUDE_MALLOC_H */ |
| 592 | |
Marek Vasut | e852ef6 | 2012-03-29 09:28:15 +0000 | [diff] [blame] | 593 | #ifdef HAVE_USR_INCLUDE_MALLOC_H |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 594 | #include "/usr/include/malloc.h" |
| 595 | #else |
| 596 | |
| 597 | /* SVID2/XPG mallinfo structure */ |
| 598 | |
| 599 | struct mallinfo { |
| 600 | int arena; /* total space allocated from system */ |
| 601 | int ordblks; /* number of non-inuse chunks */ |
| 602 | int smblks; /* unused -- always zero */ |
| 603 | int hblks; /* number of mmapped regions */ |
| 604 | int hblkhd; /* total space in mmapped regions */ |
| 605 | int usmblks; /* unused -- always zero */ |
| 606 | int fsmblks; /* unused -- always zero */ |
| 607 | int uordblks; /* total allocated space */ |
| 608 | int fordblks; /* total non-inuse space */ |
| 609 | int keepcost; /* top-most, releasable (via malloc_trim) space */ |
| 610 | }; |
| 611 | |
| 612 | /* SVID2/XPG mallopt options */ |
| 613 | |
| 614 | #define M_MXFAST 1 /* UNUSED in this malloc */ |
| 615 | #define M_NLBLKS 2 /* UNUSED in this malloc */ |
| 616 | #define M_GRAIN 3 /* UNUSED in this malloc */ |
| 617 | #define M_KEEP 4 /* UNUSED in this malloc */ |
| 618 | |
| 619 | #endif |
| 620 | |
| 621 | /* mallopt options that actually do something */ |
| 622 | |
| 623 | #define M_TRIM_THRESHOLD -1 |
| 624 | #define M_TOP_PAD -2 |
| 625 | #define M_MMAP_THRESHOLD -3 |
| 626 | #define M_MMAP_MAX -4 |
| 627 | |
| 628 | |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 629 | #ifndef DEFAULT_TRIM_THRESHOLD |
| 630 | #define DEFAULT_TRIM_THRESHOLD (128 * 1024) |
| 631 | #endif |
| 632 | |
| 633 | /* |
| 634 | M_TRIM_THRESHOLD is the maximum amount of unused top-most memory |
| 635 | to keep before releasing via malloc_trim in free(). |
| 636 | |
| 637 | Automatic trimming is mainly useful in long-lived programs. |
| 638 | Because trimming via sbrk can be slow on some systems, and can |
| 639 | sometimes be wasteful (in cases where programs immediately |
| 640 | afterward allocate more large chunks) the value should be high |
| 641 | enough so that your overall system performance would improve by |
| 642 | releasing. |
| 643 | |
| 644 | The trim threshold and the mmap control parameters (see below) |
| 645 | can be traded off with one another. Trimming and mmapping are |
| 646 | two different ways of releasing unused memory back to the |
| 647 | system. Between these two, it is often possible to keep |
| 648 | system-level demands of a long-lived program down to a bare |
| 649 | minimum. For example, in one test suite of sessions measuring |
| 650 | the XF86 X server on Linux, using a trim threshold of 128K and a |
| 651 | mmap threshold of 192K led to near-minimal long term resource |
| 652 | consumption. |
| 653 | |
| 654 | If you are using this malloc in a long-lived program, it should |
| 655 | pay to experiment with these values. As a rough guide, you |
| 656 | might set to a value close to the average size of a process |
| 657 | (program) running on your system. Releasing this much memory |
| 658 | would allow such a process to run in memory. Generally, it's |
| 659 | worth it to tune for trimming rather tham memory mapping when a |
| 660 | program undergoes phases where several large chunks are |
| 661 | allocated and released in ways that can reuse each other's |
| 662 | storage, perhaps mixed with phases where there are no such |
| 663 | chunks at all. And in well-behaved long-lived programs, |
| 664 | controlling release of large blocks via trimming versus mapping |
| 665 | is usually faster. |
| 666 | |
| 667 | However, in most programs, these parameters serve mainly as |
| 668 | protection against the system-level effects of carrying around |
| 669 | massive amounts of unneeded memory. Since frequent calls to |
| 670 | sbrk, mmap, and munmap otherwise degrade performance, the default |
| 671 | parameters are set to relatively high values that serve only as |
| 672 | safeguards. |
| 673 | |
| 674 | The default trim value is high enough to cause trimming only in |
| 675 | fairly extreme (by current memory consumption standards) cases. |
| 676 | It must be greater than page size to have any useful effect. To |
| 677 | disable trimming completely, you can set to (unsigned long)(-1); |
| 678 | |
| 679 | |
| 680 | */ |
| 681 | |
| 682 | |
| 683 | #ifndef DEFAULT_TOP_PAD |
| 684 | #define DEFAULT_TOP_PAD (0) |
| 685 | #endif |
| 686 | |
| 687 | /* |
| 688 | M_TOP_PAD is the amount of extra `padding' space to allocate or |
| 689 | retain whenever sbrk is called. It is used in two ways internally: |
| 690 | |
| 691 | * When sbrk is called to extend the top of the arena to satisfy |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 692 | a new malloc request, this much padding is added to the sbrk |
| 693 | request. |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 694 | |
| 695 | * When malloc_trim is called automatically from free(), |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 696 | it is used as the `pad' argument. |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 697 | |
| 698 | In both cases, the actual amount of padding is rounded |
| 699 | so that the end of the arena is always a system page boundary. |
| 700 | |
| 701 | The main reason for using padding is to avoid calling sbrk so |
| 702 | often. Having even a small pad greatly reduces the likelihood |
| 703 | that nearly every malloc request during program start-up (or |
| 704 | after trimming) will invoke sbrk, which needlessly wastes |
| 705 | time. |
| 706 | |
| 707 | Automatic rounding-up to page-size units is normally sufficient |
| 708 | to avoid measurable overhead, so the default is 0. However, in |
| 709 | systems where sbrk is relatively slow, it can pay to increase |
| 710 | this value, at the expense of carrying around more memory than |
| 711 | the program needs. |
| 712 | |
| 713 | */ |
| 714 | |
| 715 | |
| 716 | #ifndef DEFAULT_MMAP_THRESHOLD |
| 717 | #define DEFAULT_MMAP_THRESHOLD (128 * 1024) |
| 718 | #endif |
| 719 | |
| 720 | /* |
| 721 | |
| 722 | M_MMAP_THRESHOLD is the request size threshold for using mmap() |
| 723 | to service a request. Requests of at least this size that cannot |
| 724 | be allocated using already-existing space will be serviced via mmap. |
| 725 | (If enough normal freed space already exists it is used instead.) |
| 726 | |
| 727 | Using mmap segregates relatively large chunks of memory so that |
| 728 | they can be individually obtained and released from the host |
| 729 | system. A request serviced through mmap is never reused by any |
| 730 | other request (at least not directly; the system may just so |
| 731 | happen to remap successive requests to the same locations). |
| 732 | |
| 733 | Segregating space in this way has the benefit that mmapped space |
| 734 | can ALWAYS be individually released back to the system, which |
| 735 | helps keep the system level memory demands of a long-lived |
| 736 | program low. Mapped memory can never become `locked' between |
| 737 | other chunks, as can happen with normally allocated chunks, which |
| 738 | menas that even trimming via malloc_trim would not release them. |
| 739 | |
| 740 | However, it has the disadvantages that: |
| 741 | |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 742 | 1. The space cannot be reclaimed, consolidated, and then |
| 743 | used to service later requests, as happens with normal chunks. |
| 744 | 2. It can lead to more wastage because of mmap page alignment |
| 745 | requirements |
| 746 | 3. It causes malloc performance to be more dependent on host |
| 747 | system memory management support routines which may vary in |
| 748 | implementation quality and may impose arbitrary |
| 749 | limitations. Generally, servicing a request via normal |
| 750 | malloc steps is faster than going through a system's mmap. |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 751 | |
| 752 | All together, these considerations should lead you to use mmap |
| 753 | only for relatively large requests. |
| 754 | |
| 755 | |
| 756 | */ |
| 757 | |
| 758 | |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 759 | #ifndef DEFAULT_MMAP_MAX |
Marek Vasut | e852ef6 | 2012-03-29 09:28:15 +0000 | [diff] [blame] | 760 | #ifdef HAVE_MMAP |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 761 | #define DEFAULT_MMAP_MAX (64) |
| 762 | #else |
| 763 | #define DEFAULT_MMAP_MAX (0) |
| 764 | #endif |
| 765 | #endif |
| 766 | |
| 767 | /* |
| 768 | M_MMAP_MAX is the maximum number of requests to simultaneously |
| 769 | service using mmap. This parameter exists because: |
| 770 | |
wdenk | 57b2d80 | 2003-06-27 21:31:46 +0000 | [diff] [blame] | 771 | 1. Some systems have a limited number of internal tables for |
| 772 | use by mmap. |
| 773 | 2. In most systems, overreliance on mmap can degrade overall |
| 774 | performance. |
| 775 | 3. If a program allocates many large regions, it is probably |
| 776 | better off using normal sbrk-based allocation routines that |
| 777 | can reclaim and reallocate normal heap memory. Using a |
| 778 | small value allows transition into this mode after the |
| 779 | first few allocations. |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 780 | |
| 781 | Setting to 0 disables all use of mmap. If HAVE_MMAP is not set, |
| 782 | the default value is 0, and attempts to set it to non-zero values |
| 783 | in mallopt will fail. |
| 784 | */ |
| 785 | |
| 786 | |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 787 | /* |
| 788 | USE_DL_PREFIX will prefix all public routines with the string 'dl'. |
| 789 | Useful to quickly avoid procedure declaration conflicts and linker |
| 790 | symbol conflicts with existing memory allocation routines. |
| 791 | |
| 792 | */ |
| 793 | |
Simon Glass | 6fbd739 | 2020-02-03 07:35:58 -0700 | [diff] [blame] | 794 | /* |
| 795 | * Rename the U-Boot alloc functions so that sandbox can still use the system |
| 796 | * ones |
| 797 | */ |
| 798 | #ifdef CONFIG_SANDBOX |
| 799 | #define USE_DL_PREFIX |
| 800 | #endif |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 801 | |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 802 | /* |
| 803 | |
| 804 | Special defines for linux libc |
| 805 | |
| 806 | Except when compiled using these special defines for Linux libc |
| 807 | using weak aliases, this malloc is NOT designed to work in |
| 808 | multithreaded applications. No semaphores or other concurrency |
| 809 | control are provided to ensure that multiple malloc or free calls |
| 810 | don't run at the same time, which could be disasterous. A single |
| 811 | semaphore could be used across malloc, realloc, and free (which is |
| 812 | essentially the effect of the linux weak alias approach). It would |
| 813 | be hard to obtain finer granularity. |
| 814 | |
| 815 | */ |
| 816 | |
| 817 | |
| 818 | #ifdef INTERNAL_LINUX_C_LIB |
| 819 | |
| 820 | #if __STD_C |
| 821 | |
| 822 | Void_t * __default_morecore_init (ptrdiff_t); |
| 823 | Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init; |
| 824 | |
| 825 | #else |
| 826 | |
| 827 | Void_t * __default_morecore_init (); |
| 828 | Void_t *(*__morecore)() = __default_morecore_init; |
| 829 | |
| 830 | #endif |
| 831 | |
| 832 | #define MORECORE (*__morecore) |
| 833 | #define MORECORE_FAILURE 0 |
| 834 | #define MORECORE_CLEARS 1 |
| 835 | |
| 836 | #else /* INTERNAL_LINUX_C_LIB */ |
| 837 | |
| 838 | #if __STD_C |
| 839 | extern Void_t* sbrk(ptrdiff_t); |
| 840 | #else |
| 841 | extern Void_t* sbrk(); |
| 842 | #endif |
| 843 | |
| 844 | #ifndef MORECORE |
| 845 | #define MORECORE sbrk |
| 846 | #endif |
| 847 | |
| 848 | #ifndef MORECORE_FAILURE |
| 849 | #define MORECORE_FAILURE -1 |
| 850 | #endif |
| 851 | |
| 852 | #ifndef MORECORE_CLEARS |
| 853 | #define MORECORE_CLEARS 1 |
| 854 | #endif |
| 855 | |
| 856 | #endif /* INTERNAL_LINUX_C_LIB */ |
| 857 | |
| 858 | #if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__) |
| 859 | |
| 860 | #define cALLOc __libc_calloc |
| 861 | #define fREe __libc_free |
| 862 | #define mALLOc __libc_malloc |
| 863 | #define mEMALIGn __libc_memalign |
| 864 | #define rEALLOc __libc_realloc |
| 865 | #define vALLOc __libc_valloc |
| 866 | #define pvALLOc __libc_pvalloc |
| 867 | #define mALLINFo __libc_mallinfo |
| 868 | #define mALLOPt __libc_mallopt |
| 869 | |
| 870 | #pragma weak calloc = __libc_calloc |
| 871 | #pragma weak free = __libc_free |
| 872 | #pragma weak cfree = __libc_free |
| 873 | #pragma weak malloc = __libc_malloc |
| 874 | #pragma weak memalign = __libc_memalign |
| 875 | #pragma weak realloc = __libc_realloc |
| 876 | #pragma weak valloc = __libc_valloc |
| 877 | #pragma weak pvalloc = __libc_pvalloc |
| 878 | #pragma weak mallinfo = __libc_mallinfo |
| 879 | #pragma weak mallopt = __libc_mallopt |
| 880 | |
| 881 | #else |
| 882 | |
Hans de Goede | 9f9df6f | 2015-09-13 14:45:15 +0200 | [diff] [blame] | 883 | #if CONFIG_IS_ENABLED(SYS_MALLOC_SIMPLE) |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 884 | #define malloc malloc_simple |
| 885 | #define realloc realloc_simple |
| 886 | #define memalign memalign_simple |
| 887 | static inline void free(void *ptr) {} |
| 888 | void *calloc(size_t nmemb, size_t size); |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 889 | void *realloc_simple(void *ptr, size_t size); |
Simon Glass | 8ed64a4 | 2018-11-18 08:14:26 -0700 | [diff] [blame] | 890 | void malloc_simple_info(void); |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 891 | #else |
| 892 | |
| 893 | # ifdef USE_DL_PREFIX |
| 894 | # define cALLOc dlcalloc |
| 895 | # define fREe dlfree |
| 896 | # define mALLOc dlmalloc |
| 897 | # define mEMALIGn dlmemalign |
| 898 | # define rEALLOc dlrealloc |
| 899 | # define vALLOc dlvalloc |
| 900 | # define pvALLOc dlpvalloc |
| 901 | # define mALLINFo dlmallinfo |
| 902 | # define mALLOPt dlmallopt |
Simon Glass | 6fbd739 | 2020-02-03 07:35:58 -0700 | [diff] [blame] | 903 | |
| 904 | /* Ensure that U-Boot actually uses these too */ |
| 905 | #define calloc dlcalloc |
| 906 | #define free(ptr) dlfree(ptr) |
| 907 | #define malloc(x) dlmalloc(x) |
| 908 | #define memalign dlmemalign |
| 909 | #define realloc dlrealloc |
| 910 | #define valloc dlvalloc |
| 911 | #define pvalloc dlpvalloc |
| 912 | #define mallinfo() dlmallinfo() |
| 913 | #define mallopt dlmallopt |
| 914 | #define malloc_trim dlmalloc_trim |
| 915 | #define malloc_usable_size dlmalloc_usable_size |
| 916 | #define malloc_stats dlmalloc_stats |
| 917 | |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 918 | # else /* USE_DL_PREFIX */ |
| 919 | # define cALLOc calloc |
| 920 | # define fREe free |
| 921 | # define mALLOc malloc |
| 922 | # define mEMALIGn memalign |
| 923 | # define rEALLOc realloc |
| 924 | # define vALLOc valloc |
| 925 | # define pvALLOc pvalloc |
| 926 | # define mALLINFo mallinfo |
| 927 | # define mALLOPt mallopt |
| 928 | # endif /* USE_DL_PREFIX */ |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 929 | |
| 930 | #endif |
| 931 | |
Simon Glass | d1d087d | 2015-02-27 22:06:36 -0700 | [diff] [blame] | 932 | /* Set up pre-relocation malloc() ready for use */ |
| 933 | int initf_malloc(void); |
| 934 | |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 935 | /* Public routines */ |
| 936 | |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 937 | /* Simple versions which can be used when space is tight */ |
| 938 | void *malloc_simple(size_t size); |
Andreas Dannenberg | ecc2740 | 2019-03-27 13:17:26 -0500 | [diff] [blame] | 939 | void *memalign_simple(size_t alignment, size_t bytes); |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 940 | |
Stephen Warren | c58591a | 2016-03-05 10:30:52 -0700 | [diff] [blame] | 941 | #pragma GCC visibility push(hidden) |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 942 | # if __STD_C |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 943 | |
| 944 | Void_t* mALLOc(size_t); |
| 945 | void fREe(Void_t*); |
| 946 | Void_t* rEALLOc(Void_t*, size_t); |
| 947 | Void_t* mEMALIGn(size_t, size_t); |
| 948 | Void_t* vALLOc(size_t); |
| 949 | Void_t* pvALLOc(size_t); |
| 950 | Void_t* cALLOc(size_t, size_t); |
| 951 | void cfree(Void_t*); |
| 952 | int malloc_trim(size_t); |
| 953 | size_t malloc_usable_size(Void_t*); |
| 954 | void malloc_stats(void); |
| 955 | int mALLOPt(int, int); |
| 956 | struct mallinfo mALLINFo(void); |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 957 | # else |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 958 | Void_t* mALLOc(); |
| 959 | void fREe(); |
| 960 | Void_t* rEALLOc(); |
| 961 | Void_t* mEMALIGn(); |
| 962 | Void_t* vALLOc(); |
| 963 | Void_t* pvALLOc(); |
| 964 | Void_t* cALLOc(); |
| 965 | void cfree(); |
| 966 | int malloc_trim(); |
| 967 | size_t malloc_usable_size(); |
| 968 | void malloc_stats(); |
| 969 | int mALLOPt(); |
| 970 | struct mallinfo mALLINFo(); |
Simon Glass | 9489046 | 2014-11-10 17:16:43 -0700 | [diff] [blame] | 971 | # endif |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 972 | #endif |
Stephen Warren | c58591a | 2016-03-05 10:30:52 -0700 | [diff] [blame] | 973 | #pragma GCC visibility pop |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 974 | |
Peter Tyser | a78ded6 | 2009-08-21 23:05:19 -0500 | [diff] [blame] | 975 | /* |
| 976 | * Begin and End of memory area for malloc(), and current "brk" |
| 977 | */ |
| 978 | extern ulong mem_malloc_start; |
| 979 | extern ulong mem_malloc_end; |
| 980 | extern ulong mem_malloc_brk; |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 981 | |
Peter Tyser | 781c9b8 | 2009-08-21 23:05:21 -0500 | [diff] [blame] | 982 | void mem_malloc_init(ulong start, ulong size); |
| 983 | |
wdenk | 5b1d713 | 2002-11-03 00:07:02 +0000 | [diff] [blame] | 984 | #ifdef __cplusplus |
| 985 | }; /* end of extern "C" */ |
| 986 | #endif |
Jean-Christophe PLAGNIOL-VILLARD | d93b1d3 | 2009-06-13 12:55:37 +0200 | [diff] [blame] | 987 | |
| 988 | #endif /* __MALLOC_H__ */ |