blob: 2301e3ad5386b7bd9e9b02e5d5caf1cd54f77e0c [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau62405a22014-12-23 13:51:28 +01002 * include/common/memory.h
3 * Memory management definitions..
4 *
5 * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
Willy Tarreaubaaee002006-06-26 02:48:02 +020021
Willy Tarreau2dd0d472006-06-29 17:53:05 +020022#ifndef _COMMON_MEMORY_H
23#define _COMMON_MEMORY_H
Willy Tarreaubaaee002006-06-26 02:48:02 +020024
Willy Tarreau158fa752017-11-22 15:47:29 +010025#include <sys/mman.h>
26
Willy Tarreaubaaee002006-06-26 02:48:02 +020027#include <stdlib.h>
Willy Tarreaue430e772014-12-23 14:13:16 +010028#include <string.h>
David Carlier4ee76d02018-02-18 19:36:42 +000029#include <stdint.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020030
Willy Tarreau2dd0d472006-06-29 17:53:05 +020031#include <common/config.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020032#include <common/mini-clist.h>
Christopher Fauletb349e482017-08-29 09:52:38 +020033#include <common/hathreads.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020034
Willy Tarreaua84dcb82015-10-28 12:04:02 +010035#ifndef DEBUG_DONT_SHARE_POOLS
Willy Tarreau50e608d2007-05-13 18:26:08 +020036#define MEM_F_SHARED 0x1
Willy Tarreaua84dcb82015-10-28 12:04:02 +010037#else
38#define MEM_F_SHARED 0
39#endif
Willy Tarreau581bf812016-01-25 02:19:13 +010040#define MEM_F_EXACT 0x2
Willy Tarreau50e608d2007-05-13 18:26:08 +020041
Willy Tarreauac421112015-10-28 15:09:29 +010042/* reserve an extra void* at the end of a pool for linking */
43#ifdef DEBUG_MEMORY_POOLS
44#define POOL_EXTRA (sizeof(void *))
45#define POOL_LINK(pool, item) (void **)(((char *)item) + (pool->size))
46#else
47#define POOL_EXTRA (0)
48#define POOL_LINK(pool, item) ((void **)(item))
49#endif
50
Willy Tarreau0a93b642018-10-16 07:58:39 +020051#define MAX_BASE_POOLS 32
52
Willy Tarreaue18db9e2018-10-16 10:28:54 +020053struct pool_cache_head {
54 struct list list; /* head of objects in this pool */
55 size_t size; /* size of an object */
56 unsigned int count; /* number of objects in this pool */
57};
58
59struct pool_cache_item {
60 struct list by_pool; /* link to objects in this pool */
61 struct list by_lru; /* link to objects by LRU order */
62};
63
64extern THREAD_LOCAL struct pool_cache_head pool_cache[MAX_BASE_POOLS];
65extern THREAD_LOCAL struct list pool_lru_head; /* oldest objects */
66extern THREAD_LOCAL size_t pool_cache_bytes; /* total cache size */
67extern THREAD_LOCAL size_t pool_cache_count; /* #cache objects */
68
Willy Tarreauf161d0f2018-02-22 14:05:55 +010069#ifdef CONFIG_HAP_LOCKLESS_POOLS
Olivier Houchardcf975d42018-01-24 18:38:31 +010070struct pool_free_list {
71 void **free_list;
72 uintptr_t seq;
73};
74#endif
75
Willy Tarreau50e608d2007-05-13 18:26:08 +020076struct pool_head {
Willy Tarreau1ca1b702017-11-26 10:50:36 +010077 void **free_list;
Willy Tarreauf161d0f2018-02-22 14:05:55 +010078#ifdef CONFIG_HAP_LOCKLESS_POOLS
Olivier Houchardcf975d42018-01-24 18:38:31 +010079 uintptr_t seq;
80#else
81 __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
82#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +020083 unsigned int used; /* how many chunks are currently in use */
84 unsigned int allocated; /* how many chunks have been allocated */
85 unsigned int limit; /* hard limit on the number of chunks */
86 unsigned int minavail; /* how many chunks are expected to be used */
87 unsigned int size; /* chunk size */
88 unsigned int flags; /* MEM_F_* */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020089 unsigned int users; /* number of pools sharing this zone */
Willy Tarreau58102cf2015-10-28 16:24:21 +010090 unsigned int failed; /* failed allocations */
Olivier Houchardcf975d42018-01-24 18:38:31 +010091 struct list list; /* list of all known pools */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020092 char name[12]; /* name of the pool */
Willy Tarreau1ca1b702017-11-26 10:50:36 +010093} __attribute__((aligned(64)));
Willy Tarreau50e608d2007-05-13 18:26:08 +020094
Willy Tarreau0a93b642018-10-16 07:58:39 +020095
96extern struct pool_head pool_base_start[MAX_BASE_POOLS];
97extern unsigned int pool_base_count;
98
Willy Tarreau067ac9f2015-10-08 14:12:13 +020099/* poison each newly allocated area with this byte if >= 0 */
100extern int mem_poison_byte;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200101
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100102/* Allocates new entries for pool <pool> until there are at least <avail> + 1
103 * available, then returns the last one for immediate use, so that at least
104 * <avail> are left available in the pool upon return. NULL is returned if the
105 * last entry could not be allocated. It's important to note that at least one
106 * allocation is always performed even if there are enough entries in the pool.
107 * A call to the garbage collector is performed at most once in case malloc()
108 * returns an error, before returning NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200109 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200110void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100111void *pool_refill_alloc(struct pool_head *pool, unsigned int avail);
Willy Tarreau50e608d2007-05-13 18:26:08 +0200112
113/* Try to find an existing shared pool with the same characteristics and
114 * returns it, otherwise creates this one. NULL is returned if no memory
115 * is available for a new creation.
116 */
117struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
118
119/* Dump statistics on pools usage.
120 */
Willy Tarreau12833bb2014-01-28 16:49:56 +0100121void dump_pools_to_trash();
Willy Tarreau50e608d2007-05-13 18:26:08 +0200122void dump_pools(void);
Willy Tarreau58102cf2015-10-28 16:24:21 +0100123int pool_total_failures();
124unsigned long pool_total_allocated();
125unsigned long pool_total_used();
Willy Tarreau50e608d2007-05-13 18:26:08 +0200126
127/*
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200128 * This function frees whatever can be freed in pool <pool>.
129 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100130void pool_flush(struct pool_head *pool);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200131
132/*
133 * This function frees whatever can be freed in all pools, but respecting
134 * the minimum thresholds imposed by owners.
Christopher Fauletb349e482017-08-29 09:52:38 +0200135 *
Willy Tarreaubafbe012017-11-24 17:34:44 +0100136 * <pool_ctx> is used when pool_gc is called to release resources to allocate
Christopher Fauletb349e482017-08-29 09:52:38 +0200137 * an element in __pool_refill_alloc. It is important because <pool_ctx> is
138 * already locked, so we need to skip the lock here.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200139 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100140void pool_gc(struct pool_head *pool_ctx);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200141
142/*
143 * This function destroys a pull by freeing it completely.
144 * This should be called only under extreme circumstances.
145 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100146void *pool_destroy(struct pool_head *pool);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200147
Willy Tarreau0a93b642018-10-16 07:58:39 +0200148/* returns the pool index for pool <pool>, or -1 if this pool has no index */
149static inline ssize_t pool_get_index(const struct pool_head *pool)
150{
151 size_t idx;
152
153 idx = pool - pool_base_start;
154 if (idx >= MAX_BASE_POOLS)
155 return -1;
156 return idx;
157}
158
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100159#ifdef CONFIG_HAP_LOCKLESS_POOLS
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200160
161/* Tries to retrieve an object from the local pool cache corresponding to pool
162 * <pool>. Returns NULL if none is available.
163 */
164static inline void *__pool_get_from_cache(struct pool_head *pool)
165{
166 ssize_t idx = pool_get_index(pool);
167 struct pool_cache_item *item;
168
169 /* pool not in cache */
170 if (idx < 0)
171 return NULL;
172
173 /* never allocated or empty */
174 if (pool_cache[idx].list.n == NULL || LIST_ISEMPTY(&pool_cache[idx].list))
175 return NULL;
176
177 item = LIST_NEXT(&pool_cache[idx].list, typeof(item), by_pool);
178 pool_cache[idx].count--;
179 pool_cache_bytes -= pool_cache[idx].size;
180 pool_cache_count--;
181 LIST_DEL(&item->by_pool);
182 LIST_DEL(&item->by_lru);
183 return item;
184}
185
Olivier Houchardcf975d42018-01-24 18:38:31 +0100186/*
187 * Returns a pointer to type <type> taken from the pool <pool_type> if
188 * available, otherwise returns NULL. No malloc() is attempted, and poisonning
189 * is never performed. The purpose is to get the fastest possible allocation.
190 */
191static inline void *__pool_get_first(struct pool_head *pool)
192{
193 struct pool_free_list cmp, new;
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200194 void *ret = __pool_get_from_cache(pool);
195
196 if (ret)
197 return ret;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100198
199 cmp.seq = pool->seq;
200 __ha_barrier_load();
201
202 cmp.free_list = pool->free_list;
203 do {
204 if (cmp.free_list == NULL)
205 return NULL;
206 new.seq = cmp.seq + 1;
207 __ha_barrier_load();
208 new.free_list = *POOL_LINK(pool, cmp.free_list);
209 } while (__ha_cas_dw((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
Tim Duesterhus05f6a432018-02-20 00:49:46 +0100210
Olivier Houchardcf975d42018-01-24 18:38:31 +0100211 HA_ATOMIC_ADD(&pool->used, 1);
212#ifdef DEBUG_MEMORY_POOLS
213 /* keep track of where the element was allocated from */
214 *POOL_LINK(pool, cmp.free_list) = (void *)pool;
215#endif
216 return cmp.free_list;
217}
218
219static inline void *pool_get_first(struct pool_head *pool)
220{
221 void *ret;
222
223 ret = __pool_get_first(pool);
224 return ret;
225}
226/*
227 * Returns a pointer to type <type> taken from the pool <pool_type> or
228 * dynamically allocated. In the first case, <pool_type> is updated to point to
229 * the next element in the list. No memory poisonning is ever performed on the
230 * returned area.
231 */
232static inline void *pool_alloc_dirty(struct pool_head *pool)
233{
234 void *p;
235
236 if ((p = __pool_get_first(pool)) == NULL)
237 p = __pool_refill_alloc(pool, 0);
238 return p;
239}
240
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200241/*
Olivier Houchardcf975d42018-01-24 18:38:31 +0100242 * Returns a pointer to type <type> taken from the pool <pool_type> or
243 * dynamically allocated. In the first case, <pool_type> is updated to point to
244 * the next element in the list. Memory poisonning is performed if enabled.
245 */
246static inline void *pool_alloc(struct pool_head *pool)
247{
248 void *p;
249
250 p = pool_alloc_dirty(pool);
251#ifdef DEBUG_MEMORY_POOLS
252 if (p) {
253 /* keep track of where the element was allocated from */
254 *POOL_LINK(pool, p) = (void *)pool;
255 }
256#endif
257 if (p && mem_poison_byte >= 0) {
258 memset(p, mem_poison_byte, pool->size);
259 }
260
261 return p;
262}
263
Willy Tarreau146794d2018-10-16 08:55:15 +0200264/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
265 * Both the pool and the pointer must be valid. Use pool_free() for normal
266 * operations.
267 */
268static inline void __pool_free(struct pool_head *pool, void *ptr)
269{
Willy Tarreau7a6ad882018-10-20 17:37:38 +0200270 void **free_list = pool->free_list;
Willy Tarreau146794d2018-10-16 08:55:15 +0200271
272 do {
273 *POOL_LINK(pool, ptr) = (void *)free_list;
274 __ha_barrier_store();
Willy Tarreau7a6ad882018-10-20 17:37:38 +0200275 } while (!HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
Willy Tarreau146794d2018-10-16 08:55:15 +0200276 HA_ATOMIC_SUB(&pool->used, 1);
277}
278
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200279/* frees an object to the local cache, possibly pushing oldest objects to the
280 * global pool.
281 */
282void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx);
283static inline void pool_put_to_cache(struct pool_head *pool, void *ptr)
284{
285 ssize_t idx = pool_get_index(pool);
286
287 /* pool not in cache or too many objects for this pool (more than
288 * half of the cache is used and this pool uses more than 1/8 of
289 * the cache size).
290 */
291 if (idx < 0 ||
292 (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4 &&
293 pool_cache[idx].count >= 16 + pool_cache_count / 8)) {
294 __pool_free(pool, ptr);
295 return;
296 }
297 __pool_put_to_cache(pool, ptr, idx);
298}
299
Olivier Houchardcf975d42018-01-24 18:38:31 +0100300/*
301 * Puts a memory area back to the corresponding pool.
302 * Items are chained directly through a pointer that
303 * is written in the beginning of the memory area, so
304 * there's no need for any carrier cell. This implies
305 * that each memory area is at least as big as one
306 * pointer. Just like with the libc's free(), nothing
307 * is done if <ptr> is NULL.
308 */
309static inline void pool_free(struct pool_head *pool, void *ptr)
310{
311 if (likely(ptr != NULL)) {
Olivier Houchardcf975d42018-01-24 18:38:31 +0100312#ifdef DEBUG_MEMORY_POOLS
313 /* we'll get late corruption if we refill to the wrong pool or double-free */
314 if (*POOL_LINK(pool, ptr) != (void *)pool)
315 *(volatile int *)0 = 0;
316#endif
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200317 pool_put_to_cache(pool, ptr);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100318 }
319}
320
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100321#else /* CONFIG_HAP_LOCKLESS_POOLS */
Olivier Houchardcf975d42018-01-24 18:38:31 +0100322/*
Willy Tarreau02622412014-12-08 16:35:23 +0100323 * Returns a pointer to type <type> taken from the pool <pool_type> if
324 * available, otherwise returns NULL. No malloc() is attempted, and poisonning
325 * is never performed. The purpose is to get the fastest possible allocation.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200326 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200327static inline void *__pool_get_first(struct pool_head *pool)
Willy Tarreaue430e772014-12-23 14:13:16 +0100328{
329 void *p;
330
Willy Tarreau02622412014-12-08 16:35:23 +0100331 if ((p = pool->free_list) != NULL) {
Willy Tarreauac421112015-10-28 15:09:29 +0100332 pool->free_list = *POOL_LINK(pool, p);
Willy Tarreaue430e772014-12-23 14:13:16 +0100333 pool->used++;
Willy Tarreaude30a682015-10-28 15:23:51 +0100334#ifdef DEBUG_MEMORY_POOLS
335 /* keep track of where the element was allocated from */
336 *POOL_LINK(pool, p) = (void *)pool;
337#endif
Willy Tarreaue430e772014-12-23 14:13:16 +0100338 }
339 return p;
340}
Willy Tarreau50e608d2007-05-13 18:26:08 +0200341
Christopher Fauletb349e482017-08-29 09:52:38 +0200342static inline void *pool_get_first(struct pool_head *pool)
343{
344 void *ret;
345
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100346 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200347 ret = __pool_get_first(pool);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100348 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200349 return ret;
350}
Willy Tarreau50e608d2007-05-13 18:26:08 +0200351/*
Willy Tarreau02622412014-12-08 16:35:23 +0100352 * Returns a pointer to type <type> taken from the pool <pool_type> or
353 * dynamically allocated. In the first case, <pool_type> is updated to point to
354 * the next element in the list. No memory poisonning is ever performed on the
355 * returned area.
356 */
357static inline void *pool_alloc_dirty(struct pool_head *pool)
358{
359 void *p;
360
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100361 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200362 if ((p = __pool_get_first(pool)) == NULL)
363 p = __pool_refill_alloc(pool, 0);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100364 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreau02622412014-12-08 16:35:23 +0100365 return p;
366}
367
Willy Tarreau158fa752017-11-22 15:47:29 +0100368#ifndef DEBUG_UAF /* normal allocator */
369
Willy Tarreauf13322e2017-11-22 10:50:54 +0100370/* allocates an area of size <size> and returns it. The semantics are similar
371 * to those of malloc().
372 */
373static inline void *pool_alloc_area(size_t size)
374{
375 return malloc(size);
376}
377
378/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
379 * semantics are identical to free() except that the size is specified and
380 * may be ignored.
381 */
382static inline void pool_free_area(void *area, size_t __maybe_unused size)
383{
384 free(area);
385}
386
Willy Tarreau158fa752017-11-22 15:47:29 +0100387#else /* use-after-free detector */
388
389/* allocates an area of size <size> and returns it. The semantics are similar
390 * to those of malloc(). However the allocation is rounded up to 4kB so that a
391 * full page is allocated. This ensures the object can be freed alone so that
392 * future dereferences are easily detected. The returned object is always
Willy Tarreau364d7452018-02-22 14:14:23 +0100393 * 16-bytes aligned to avoid issues with unaligned structure objects. In case
394 * some padding is added, the area's start address is copied at the end of the
395 * padding to help detect underflows.
Willy Tarreau158fa752017-11-22 15:47:29 +0100396 */
Olivier Houchard62975a72018-10-21 01:33:11 +0200397#include <errno.h>
Willy Tarreau158fa752017-11-22 15:47:29 +0100398static inline void *pool_alloc_area(size_t size)
399{
400 size_t pad = (4096 - size) & 0xFF0;
Willy Tarreau5a9cce42018-02-22 11:39:23 +0100401 void *ret;
Willy Tarreau158fa752017-11-22 15:47:29 +0100402
Olivier Houchard62975a72018-10-21 01:33:11 +0200403 ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
Willy Tarreau364d7452018-02-22 14:14:23 +0100404 if (ret == MAP_FAILED)
405 return NULL;
406 if (pad >= sizeof(void *))
407 *(void **)(ret + pad - sizeof(void *)) = ret + pad;
408 return ret + pad;
Willy Tarreau158fa752017-11-22 15:47:29 +0100409}
410
411/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
412 * semantics are identical to free() except that the size must absolutely match
Willy Tarreau364d7452018-02-22 14:14:23 +0100413 * the one passed to pool_alloc_area(). In case some padding is added, the
414 * area's start address is compared to the one at the end of the padding, and
415 * a segfault is triggered if they don't match, indicating an underflow.
Willy Tarreau158fa752017-11-22 15:47:29 +0100416 */
417static inline void pool_free_area(void *area, size_t size)
418{
419 size_t pad = (4096 - size) & 0xFF0;
420
Willy Tarreau364d7452018-02-22 14:14:23 +0100421 if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area)
422 *(volatile int *)0 = 0;
423
Willy Tarreau158fa752017-11-22 15:47:29 +0100424 munmap(area - pad, (size + 4095) & -4096);
425}
426
427#endif /* DEBUG_UAF */
428
Willy Tarreau02622412014-12-08 16:35:23 +0100429/*
430 * Returns a pointer to type <type> taken from the pool <pool_type> or
431 * dynamically allocated. In the first case, <pool_type> is updated to point to
432 * the next element in the list. Memory poisonning is performed if enabled.
433 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100434static inline void *pool_alloc(struct pool_head *pool)
Willy Tarreau02622412014-12-08 16:35:23 +0100435{
436 void *p;
437
438 p = pool_alloc_dirty(pool);
Willy Tarreaude30a682015-10-28 15:23:51 +0100439#ifdef DEBUG_MEMORY_POOLS
440 if (p) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100441 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaude30a682015-10-28 15:23:51 +0100442 /* keep track of where the element was allocated from */
443 *POOL_LINK(pool, p) = (void *)pool;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100444 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreaude30a682015-10-28 15:23:51 +0100445 }
446#endif
447 if (p && mem_poison_byte >= 0) {
Willy Tarreau02622412014-12-08 16:35:23 +0100448 memset(p, mem_poison_byte, pool->size);
Willy Tarreaude30a682015-10-28 15:23:51 +0100449 }
450
Willy Tarreau02622412014-12-08 16:35:23 +0100451 return p;
452}
453
454/*
Willy Tarreau50e608d2007-05-13 18:26:08 +0200455 * Puts a memory area back to the corresponding pool.
456 * Items are chained directly through a pointer that
457 * is written in the beginning of the memory area, so
458 * there's no need for any carrier cell. This implies
459 * that each memory area is at least as big as one
Willy Tarreau48d63db2008-08-03 17:41:33 +0200460 * pointer. Just like with the libc's free(), nothing
461 * is done if <ptr> is NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200462 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100463static inline void pool_free(struct pool_head *pool, void *ptr)
Willy Tarreaue430e772014-12-23 14:13:16 +0100464{
465 if (likely(ptr != NULL)) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100466 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaude30a682015-10-28 15:23:51 +0100467#ifdef DEBUG_MEMORY_POOLS
468 /* we'll get late corruption if we refill to the wrong pool or double-free */
469 if (*POOL_LINK(pool, ptr) != (void *)pool)
470 *(int *)0 = 0;
471#endif
Willy Tarreau158fa752017-11-22 15:47:29 +0100472
473#ifndef DEBUG_UAF /* normal pool behaviour */
Willy Tarreauac421112015-10-28 15:09:29 +0100474 *POOL_LINK(pool, ptr) = (void *)pool->free_list;
Willy Tarreaue430e772014-12-23 14:13:16 +0100475 pool->free_list = (void *)ptr;
Willy Tarreau158fa752017-11-22 15:47:29 +0100476#else /* release the entry for real to detect use after free */
477 /* ensure we crash on double free or free of a const area*/
478 *(uint32_t *)ptr = 0xDEADADD4;
479 pool_free_area(ptr, pool->size + POOL_EXTRA);
480 pool->allocated--;
481#endif /* DEBUG_UAF */
Willy Tarreaue430e772014-12-23 14:13:16 +0100482 pool->used--;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100483 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue430e772014-12-23 14:13:16 +0100484 }
485}
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100486#endif /* CONFIG_HAP_LOCKLESS_POOLS */
Willy Tarreau2dd0d472006-06-29 17:53:05 +0200487#endif /* _COMMON_MEMORY_H */
Willy Tarreaubaaee002006-06-26 02:48:02 +0200488
489/*
490 * Local variables:
491 * c-indent-level: 8
492 * c-basic-offset: 8
493 * End:
494 */