blob: 311354c948dbb3dc493ff76b7ab2ecbf143bd385 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau62405a22014-12-23 13:51:28 +01002 * include/common/memory.h
3 * Memory management definitions..
4 *
5 * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
Willy Tarreau2dd0d472006-06-29 17:53:05 +020022#ifndef _COMMON_MEMORY_H
23#define _COMMON_MEMORY_H
Willy Tarreaubaaee002006-06-26 02:48:02 +020024
Willy Tarreau158fa752017-11-22 15:47:29 +010025#include <sys/mman.h>
26
Willy Tarreaubaaee002006-06-26 02:48:02 +020027#include <stdlib.h>
Willy Tarreaue430e772014-12-23 14:13:16 +010028#include <string.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020029
Willy Tarreau2dd0d472006-06-29 17:53:05 +020030#include <common/config.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020031#include <common/mini-clist.h>
Christopher Fauletb349e482017-08-29 09:52:38 +020032#include <common/hathreads.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020033
Willy Tarreaua84dcb82015-10-28 12:04:02 +010034#ifndef DEBUG_DONT_SHARE_POOLS
Willy Tarreau50e608d2007-05-13 18:26:08 +020035#define MEM_F_SHARED 0x1
Willy Tarreaua84dcb82015-10-28 12:04:02 +010036#else
37#define MEM_F_SHARED 0
38#endif
Willy Tarreau581bf812016-01-25 02:19:13 +010039#define MEM_F_EXACT 0x2
Willy Tarreau50e608d2007-05-13 18:26:08 +020040
Willy Tarreauac421112015-10-28 15:09:29 +010041/* reserve an extra void* at the end of a pool for linking */
42#ifdef DEBUG_MEMORY_POOLS
43#define POOL_EXTRA (sizeof(void *))
44#define POOL_LINK(pool, item) (void **)(((char *)item) + (pool->size))
45#else
46#define POOL_EXTRA (0)
47#define POOL_LINK(pool, item) ((void **)(item))
48#endif
49
Willy Tarreau50e608d2007-05-13 18:26:08 +020050struct pool_head {
51 void **free_list;
Christopher Faulet9dcf9b62017-11-13 10:34:01 +010052 __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
Willy Tarreau50e608d2007-05-13 18:26:08 +020053 struct list list; /* list of all known pools */
54 unsigned int used; /* how many chunks are currently in use */
55 unsigned int allocated; /* how many chunks have been allocated */
56 unsigned int limit; /* hard limit on the number of chunks */
57 unsigned int minavail; /* how many chunks are expected to be used */
58 unsigned int size; /* chunk size */
59 unsigned int flags; /* MEM_F_* */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020060 unsigned int users; /* number of pools sharing this zone */
Willy Tarreau58102cf2015-10-28 16:24:21 +010061 unsigned int failed; /* failed allocations */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020062 char name[12]; /* name of the pool */
Willy Tarreau50e608d2007-05-13 18:26:08 +020063};
64
Willy Tarreau067ac9f2015-10-08 14:12:13 +020065/* poison each newly allocated area with this byte if >= 0 */
66extern int mem_poison_byte;
Willy Tarreau50e608d2007-05-13 18:26:08 +020067
Willy Tarreaua885f6d2014-12-03 15:25:28 +010068/* Allocates new entries for pool <pool> until there are at least <avail> + 1
69 * available, then returns the last one for immediate use, so that at least
70 * <avail> are left available in the pool upon return. NULL is returned if the
71 * last entry could not be allocated. It's important to note that at least one
72 * allocation is always performed even if there are enough entries in the pool.
73 * A call to the garbage collector is performed at most once in case malloc()
74 * returns an error, before returning NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +020075 */
Christopher Fauletb349e482017-08-29 09:52:38 +020076void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail);
Willy Tarreaua885f6d2014-12-03 15:25:28 +010077void *pool_refill_alloc(struct pool_head *pool, unsigned int avail);
Willy Tarreau50e608d2007-05-13 18:26:08 +020078
79/* Try to find an existing shared pool with the same characteristics and
80 * returns it, otherwise creates this one. NULL is returned if no memory
81 * is available for a new creation.
82 */
83struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
84
85/* Dump statistics on pools usage.
86 */
Willy Tarreau12833bb2014-01-28 16:49:56 +010087void dump_pools_to_trash();
Willy Tarreau50e608d2007-05-13 18:26:08 +020088void dump_pools(void);
Willy Tarreau58102cf2015-10-28 16:24:21 +010089int pool_total_failures();
90unsigned long pool_total_allocated();
91unsigned long pool_total_used();
Willy Tarreau50e608d2007-05-13 18:26:08 +020092
93/*
Willy Tarreaue6ce59d2007-05-13 19:38:49 +020094 * This function frees whatever can be freed in pool <pool>.
95 */
96void pool_flush2(struct pool_head *pool);
97
98/*
99 * This function frees whatever can be freed in all pools, but respecting
100 * the minimum thresholds imposed by owners.
Christopher Fauletb349e482017-08-29 09:52:38 +0200101 *
102 * <pool_ctx> is used when pool_gc2 is called to release resources to allocate
103 * an element in __pool_refill_alloc. It is important because <pool_ctx> is
104 * already locked, so we need to skip the lock here.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200105 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200106void pool_gc2(struct pool_head *pool_ctx);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200107
108/*
109 * This function destroys a pull by freeing it completely.
110 * This should be called only under extreme circumstances.
111 */
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200112void *pool_destroy2(struct pool_head *pool);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200113
114/*
Willy Tarreau02622412014-12-08 16:35:23 +0100115 * Returns a pointer to type <type> taken from the pool <pool_type> if
116 * available, otherwise returns NULL. No malloc() is attempted, and poisonning
117 * is never performed. The purpose is to get the fastest possible allocation.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200118 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200119static inline void *__pool_get_first(struct pool_head *pool)
Willy Tarreaue430e772014-12-23 14:13:16 +0100120{
121 void *p;
122
Willy Tarreau02622412014-12-08 16:35:23 +0100123 if ((p = pool->free_list) != NULL) {
Willy Tarreauac421112015-10-28 15:09:29 +0100124 pool->free_list = *POOL_LINK(pool, p);
Willy Tarreaue430e772014-12-23 14:13:16 +0100125 pool->used++;
Willy Tarreaude30a682015-10-28 15:23:51 +0100126#ifdef DEBUG_MEMORY_POOLS
127 /* keep track of where the element was allocated from */
128 *POOL_LINK(pool, p) = (void *)pool;
129#endif
Willy Tarreaue430e772014-12-23 14:13:16 +0100130 }
131 return p;
132}
Willy Tarreau50e608d2007-05-13 18:26:08 +0200133
Christopher Fauletb349e482017-08-29 09:52:38 +0200134static inline void *pool_get_first(struct pool_head *pool)
135{
136 void *ret;
137
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100138 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200139 ret = __pool_get_first(pool);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100140 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200141 return ret;
142}
Willy Tarreau50e608d2007-05-13 18:26:08 +0200143/*
Willy Tarreau02622412014-12-08 16:35:23 +0100144 * Returns a pointer to type <type> taken from the pool <pool_type> or
145 * dynamically allocated. In the first case, <pool_type> is updated to point to
146 * the next element in the list. No memory poisonning is ever performed on the
147 * returned area.
148 */
149static inline void *pool_alloc_dirty(struct pool_head *pool)
150{
151 void *p;
152
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100153 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200154 if ((p = __pool_get_first(pool)) == NULL)
155 p = __pool_refill_alloc(pool, 0);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100156 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreau02622412014-12-08 16:35:23 +0100157 return p;
158}
159
Willy Tarreau158fa752017-11-22 15:47:29 +0100160#ifndef DEBUG_UAF /* normal allocator */
161
Willy Tarreauf13322e2017-11-22 10:50:54 +0100162/* allocates an area of size <size> and returns it. The semantics are similar
163 * to those of malloc().
164 */
165static inline void *pool_alloc_area(size_t size)
166{
167 return malloc(size);
168}
169
170/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
171 * semantics are identical to free() except that the size is specified and
172 * may be ignored.
173 */
174static inline void pool_free_area(void *area, size_t __maybe_unused size)
175{
176 free(area);
177}
178
Willy Tarreau158fa752017-11-22 15:47:29 +0100179#else /* use-after-free detector */
180
181/* allocates an area of size <size> and returns it. The semantics are similar
182 * to those of malloc(). However the allocation is rounded up to 4kB so that a
183 * full page is allocated. This ensures the object can be freed alone so that
184 * future dereferences are easily detected. The returned object is always
185 * 16-bytes aligned to avoid issues with unaligned structure objects.
186 */
187static inline void *pool_alloc_area(size_t size)
188{
189 size_t pad = (4096 - size) & 0xFF0;
190
191 return mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0) + pad;
192}
193
194/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
195 * semantics are identical to free() except that the size must absolutely match
196 * the one passed to pool_alloc_area().
197 */
198static inline void pool_free_area(void *area, size_t size)
199{
200 size_t pad = (4096 - size) & 0xFF0;
201
202 munmap(area - pad, (size + 4095) & -4096);
203}
204
205#endif /* DEBUG_UAF */
206
Willy Tarreau02622412014-12-08 16:35:23 +0100207/*
208 * Returns a pointer to type <type> taken from the pool <pool_type> or
209 * dynamically allocated. In the first case, <pool_type> is updated to point to
210 * the next element in the list. Memory poisonning is performed if enabled.
211 */
212static inline void *pool_alloc2(struct pool_head *pool)
213{
214 void *p;
215
216 p = pool_alloc_dirty(pool);
Willy Tarreaude30a682015-10-28 15:23:51 +0100217#ifdef DEBUG_MEMORY_POOLS
218 if (p) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100219 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaude30a682015-10-28 15:23:51 +0100220 /* keep track of where the element was allocated from */
221 *POOL_LINK(pool, p) = (void *)pool;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100222 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreaude30a682015-10-28 15:23:51 +0100223 }
224#endif
225 if (p && mem_poison_byte >= 0) {
Willy Tarreau02622412014-12-08 16:35:23 +0100226 memset(p, mem_poison_byte, pool->size);
Willy Tarreaude30a682015-10-28 15:23:51 +0100227 }
228
Willy Tarreau02622412014-12-08 16:35:23 +0100229 return p;
230}
231
232/*
Willy Tarreau50e608d2007-05-13 18:26:08 +0200233 * Puts a memory area back to the corresponding pool.
234 * Items are chained directly through a pointer that
235 * is written in the beginning of the memory area, so
236 * there's no need for any carrier cell. This implies
237 * that each memory area is at least as big as one
Willy Tarreau48d63db2008-08-03 17:41:33 +0200238 * pointer. Just like with the libc's free(), nothing
239 * is done if <ptr> is NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200240 */
Willy Tarreaue430e772014-12-23 14:13:16 +0100241static inline void pool_free2(struct pool_head *pool, void *ptr)
242{
243 if (likely(ptr != NULL)) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100244 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaude30a682015-10-28 15:23:51 +0100245#ifdef DEBUG_MEMORY_POOLS
246 /* we'll get late corruption if we refill to the wrong pool or double-free */
247 if (*POOL_LINK(pool, ptr) != (void *)pool)
248 *(int *)0 = 0;
249#endif
Willy Tarreau158fa752017-11-22 15:47:29 +0100250
251#ifndef DEBUG_UAF /* normal pool behaviour */
Willy Tarreauac421112015-10-28 15:09:29 +0100252 *POOL_LINK(pool, ptr) = (void *)pool->free_list;
Willy Tarreaue430e772014-12-23 14:13:16 +0100253 pool->free_list = (void *)ptr;
Willy Tarreau158fa752017-11-22 15:47:29 +0100254#else /* release the entry for real to detect use after free */
255 /* ensure we crash on double free or free of a const area*/
256 *(uint32_t *)ptr = 0xDEADADD4;
257 pool_free_area(ptr, pool->size + POOL_EXTRA);
258 pool->allocated--;
259#endif /* DEBUG_UAF */
Willy Tarreaue430e772014-12-23 14:13:16 +0100260 pool->used--;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100261 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue430e772014-12-23 14:13:16 +0100262 }
263}
Willy Tarreau2dd0d472006-06-29 17:53:05 +0200264#endif /* _COMMON_MEMORY_H */
Willy Tarreaubaaee002006-06-26 02:48:02 +0200265
266/*
267 * Local variables:
268 * c-indent-level: 8
269 * c-basic-offset: 8
270 * End:
271 */