blob: 70a80410d7b23322139fff96a22b5536b591f122 [file] [log] [blame]
Willy Tarreau50e608d2007-05-13 18:26:08 +02001/*
2 * Memory management functions.
3 *
4 * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
Willy Tarreauf14d1902021-10-05 18:14:11 +020012
13#include <sys/mman.h>
Willy Tarreau7107c8b2018-11-26 11:44:35 +010014#include <errno.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020015
Willy Tarreau5d9ddc52021-10-06 19:54:09 +020016#include <haproxy/activity.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020017#include <haproxy/api.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020018#include <haproxy/applet-t.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020019#include <haproxy/cfgparse.h>
Willy Tarreauf1d32c42020-06-04 21:07:02 +020020#include <haproxy/channel.h>
Willy Tarreau83487a82020-06-04 20:19:54 +020021#include <haproxy/cli.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020022#include <haproxy/errors.h>
Willy Tarreauf268ee82020-06-04 17:05:57 +020023#include <haproxy/global.h>
Willy Tarreau853b2972020-05-27 18:01:47 +020024#include <haproxy/list.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020025#include <haproxy/pool.h>
Willy Tarreau2eec9b52020-06-04 19:58:55 +020026#include <haproxy/stats-t.h>
Willy Tarreau5e539c92020-06-04 20:45:39 +020027#include <haproxy/stream_interface.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020028#include <haproxy/thread.h>
Willy Tarreau48fbcae2020-06-03 18:09:46 +020029#include <haproxy/tools.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020030
Willy Tarreau50e608d2007-05-13 18:26:08 +020031
Willy Tarreau2d6f6282021-04-15 16:24:00 +020032#ifdef CONFIG_HAP_POOLS
Willy Tarreau7f0165e2018-11-26 17:09:46 +010033/* These ones are initialized per-thread on startup by init_pools() */
Willy Tarreaue18db9e2018-10-16 10:28:54 +020034THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
35THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
Willy Tarreaued891fd2020-06-01 19:00:28 +020036#endif
Willy Tarreaue18db9e2018-10-16 10:28:54 +020037
Willy Tarreau50e608d2007-05-13 18:26:08 +020038static struct list pools = LIST_HEAD_INIT(pools);
Willy Tarreau067ac9f2015-10-08 14:12:13 +020039int mem_poison_byte = -1;
Willy Tarreau50e608d2007-05-13 18:26:08 +020040
Olivier Houcharddc21ff72019-01-29 15:20:16 +010041#ifdef DEBUG_FAIL_ALLOC
42static int mem_fail_rate = 0;
Olivier Houcharddc21ff72019-01-29 15:20:16 +010043#endif
44
David Carliered232142021-11-25 16:09:45 +000045static int using_default_allocator = 1;
46static int(*my_mallctl)(const char *, void *, size_t *, void *, size_t) = NULL;
Willy Tarreau157e3932021-09-15 10:05:48 +020047
Willy Tarreau0d93a812021-12-23 09:26:30 +010048/* ask the allocator to trim memory pools.
49 * This must run under thread isolation so that competing threads trying to
50 * allocate or release memory do not prevent the allocator from completing
51 * its job. We just have to be careful as callers might already be isolated
52 * themselves.
53 */
Willy Tarreauea3323f2021-09-15 10:38:21 +020054static void trim_all_pools(void)
55{
Willy Tarreau0d93a812021-12-23 09:26:30 +010056 int isolated = thread_isolated();
57
58 if (!isolated)
59 thread_isolate();
60
David Carlierd450ff62021-11-25 16:14:38 +000061 if (my_mallctl) {
62 unsigned int i, narenas = 0;
63 size_t len = sizeof(narenas);
64
65 if (my_mallctl("arenas.narenas", &narenas, &len, NULL, 0) == 0) {
66 for (i = 0; i < narenas; i ++) {
67 char mib[32] = {0};
68 snprintf(mib, sizeof(mib), "arena.%u.purge", i);
69 (void)my_mallctl(mib, NULL, NULL, NULL, 0);
70 }
71 }
72 } else {
David Carliered232142021-11-25 16:09:45 +000073#if defined(HA_HAVE_MALLOC_TRIM)
David Carlierd450ff62021-11-25 16:14:38 +000074 if (using_default_allocator)
75 malloc_trim(0);
David CARLIERb1e190a2021-11-26 20:44:44 +000076#elif defined(HA_HAVE_MALLOC_ZONE)
77 if (using_default_allocator) {
78 vm_address_t *zones;
79 unsigned int i, nzones;
80
81 if (malloc_get_all_zones(0, NULL, &zones, &nzones) == KERN_SUCCESS) {
82 for (i = 0; i < nzones; i ++) {
83 malloc_zone_t *zone = (malloc_zone_t *)zones[i];
84
85 /* we cannot purge anonymous zones */
86 if (zone->zone_name)
87 malloc_zone_pressure_relief(zone, 0);
88 }
89 }
90 }
David Carliered232142021-11-25 16:09:45 +000091#endif
David Carlierd450ff62021-11-25 16:14:38 +000092 }
Willy Tarreau0d93a812021-12-23 09:26:30 +010093
94 if (!isolated)
95 thread_release();
Willy Tarreauea3323f2021-09-15 10:38:21 +020096}
97
Willy Tarreau157e3932021-09-15 10:05:48 +020098/* check if we're using the same allocator as the one that provides
99 * malloc_trim() and mallinfo(). The principle is that on glibc, both
100 * malloc_trim() and mallinfo() are provided, and using mallinfo() we
101 * can check if malloc() is performed through glibc or any other one
David Carliered232142021-11-25 16:09:45 +0000102 * the executable was linked against (e.g. jemalloc). Prior to this we
103 * have to check whether we're running on jemalloc by verifying if the
104 * mallctl() function is provided. Its pointer will be used later.
Willy Tarreau157e3932021-09-15 10:05:48 +0200105 */
106static void detect_allocator(void)
107{
Willy Tarreau781f07a2021-11-26 15:55:55 +0100108#if defined(__ELF__)
David Carliered232142021-11-25 16:09:45 +0000109 extern int mallctl(const char *, void *, size_t *, void *, size_t) __attribute__((weak));
110
111 my_mallctl = mallctl;
Willy Tarreau781f07a2021-11-26 15:55:55 +0100112#endif
David Carliered232142021-11-25 16:09:45 +0000113
114 if (!my_mallctl) {
115 my_mallctl = get_sym_curr_addr("mallctl");
116 using_default_allocator = (my_mallctl == NULL);
117 }
118
119 if (!my_mallctl) {
120#if defined(HA_HAVE_MALLOC_TRIM)
Willy Tarreauc2afb862021-09-16 09:18:21 +0200121#ifdef HA_HAVE_MALLINFO2
David Carliered232142021-11-25 16:09:45 +0000122 struct mallinfo2 mi1, mi2;
Willy Tarreauc2afb862021-09-16 09:18:21 +0200123#else
David Carliered232142021-11-25 16:09:45 +0000124 struct mallinfo mi1, mi2;
Willy Tarreauc2afb862021-09-16 09:18:21 +0200125#endif
David Carliered232142021-11-25 16:09:45 +0000126 void *ptr;
Willy Tarreau157e3932021-09-15 10:05:48 +0200127
Willy Tarreauc2afb862021-09-16 09:18:21 +0200128#ifdef HA_HAVE_MALLINFO2
David Carliered232142021-11-25 16:09:45 +0000129 mi1 = mallinfo2();
Willy Tarreauc2afb862021-09-16 09:18:21 +0200130#else
David Carliered232142021-11-25 16:09:45 +0000131 mi1 = mallinfo();
Willy Tarreauc2afb862021-09-16 09:18:21 +0200132#endif
David Carliered232142021-11-25 16:09:45 +0000133 ptr = DISGUISE(malloc(1));
Willy Tarreauc2afb862021-09-16 09:18:21 +0200134#ifdef HA_HAVE_MALLINFO2
David Carliered232142021-11-25 16:09:45 +0000135 mi2 = mallinfo2();
Willy Tarreauc2afb862021-09-16 09:18:21 +0200136#else
David Carliered232142021-11-25 16:09:45 +0000137 mi2 = mallinfo();
Willy Tarreauc2afb862021-09-16 09:18:21 +0200138#endif
David Carliered232142021-11-25 16:09:45 +0000139 free(DISGUISE(ptr));
Willy Tarreauea3323f2021-09-15 10:38:21 +0200140
David Carliered232142021-11-25 16:09:45 +0000141 using_default_allocator = !!memcmp(&mi1, &mi2, sizeof(mi1));
David CARLIERb1e190a2021-11-26 20:44:44 +0000142#elif defined(HA_HAVE_MALLOC_ZONE)
143 using_default_allocator = (malloc_default_zone() != NULL);
David Carliered232142021-11-25 16:09:45 +0000144#endif
145 }
Willy Tarreau845b5602021-09-15 10:41:24 +0200146}
147
148static int is_trim_enabled(void)
149{
David Carliered232142021-11-25 16:09:45 +0000150 return using_default_allocator;
Willy Tarreau157e3932021-09-15 10:05:48 +0200151}
Willy Tarreauea3323f2021-09-15 10:38:21 +0200152
Willy Tarreau50e608d2007-05-13 18:26:08 +0200153/* Try to find an existing shared pool with the same characteristics and
154 * returns it, otherwise creates this one. NULL is returned if no memory
Willy Tarreau581bf812016-01-25 02:19:13 +0100155 * is available for a new creation. Two flags are supported :
156 * - MEM_F_SHARED to indicate that the pool may be shared with other users
157 * - MEM_F_EXACT to indicate that the size must not be rounded up
Willy Tarreau50e608d2007-05-13 18:26:08 +0200158 */
159struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
160{
161 struct pool_head *pool;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200162 struct pool_head *entry;
163 struct list *start;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200164 unsigned int align;
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200165 int thr __maybe_unused;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200166
Willy Tarreauac421112015-10-28 15:09:29 +0100167 /* We need to store a (void *) at the end of the chunks. Since we know
Willy Tarreau50e608d2007-05-13 18:26:08 +0200168 * that the malloc() function will never return such a small size,
169 * let's round the size up to something slightly bigger, in order to
170 * ease merging of entries. Note that the rounding is a power of two.
Willy Tarreauac421112015-10-28 15:09:29 +0100171 * This extra (void *) is not accounted for in the size computation
172 * so that the visible parts outside are not affected.
Willy Tarreau30f931e2018-10-23 14:40:23 +0200173 *
174 * Note: for the LRU cache, we need to store 2 doubly-linked lists.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200175 */
176
Willy Tarreau581bf812016-01-25 02:19:13 +0100177 if (!(flags & MEM_F_EXACT)) {
Willy Tarreau30f931e2018-10-23 14:40:23 +0200178 align = 4 * sizeof(void *); // 2 lists = 4 pointers min
Willy Tarreau581bf812016-01-25 02:19:13 +0100179 size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
180 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200181
Willy Tarreaub5ba09e2022-02-07 10:32:00 +0100182#ifdef CONFIG_HAP_POOLS
183 /* we'll store two lists there, we need the room for this. This is
184 * guaranteed by the test above, except if MEM_F_EXACT is set, or if
185 * the only EXTRA part is in fact the one that's stored in the cache
186 * in addition to the pci struct.
187 */
188 if (size + POOL_EXTRA - POOL_EXTRA_CALLER < sizeof(struct pool_cache_item))
189 size = sizeof(struct pool_cache_item) + POOL_EXTRA_CALLER - POOL_EXTRA;
190#endif
Christopher Fauletb349e482017-08-29 09:52:38 +0200191 /* TODO: thread: we do not lock pool list for now because all pools are
192 * created during HAProxy startup (so before threads creation) */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200193 start = &pools;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200194 pool = NULL;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200195
196 list_for_each_entry(entry, &pools, list) {
197 if (entry->size == size) {
198 /* either we can share this place and we take it, or
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500199 * we look for a shareable one or for the next position
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200200 * before which we will insert a new one.
201 */
Willy Tarreau1ab6c0b2021-05-05 07:29:01 +0200202 if ((flags & entry->flags & MEM_F_SHARED)
203#ifdef DEBUG_DONT_SHARE_POOLS
204 && strcmp(name, entry->name) == 0
205#endif
206 ) {
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200207 /* we can share this one */
Willy Tarreau50e608d2007-05-13 18:26:08 +0200208 pool = entry;
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +0200209 DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
Willy Tarreau50e608d2007-05-13 18:26:08 +0200210 break;
211 }
212 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200213 else if (entry->size > size) {
214 /* insert before this one */
215 start = &entry->list;
216 break;
217 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200218 }
219
220 if (!pool) {
Willy Tarreau0a93b642018-10-16 07:58:39 +0200221 if (!pool)
222 pool = calloc(1, sizeof(*pool));
223
Willy Tarreau50e608d2007-05-13 18:26:08 +0200224 if (!pool)
225 return NULL;
226 if (name)
227 strlcpy2(pool->name, name, sizeof(pool->name));
228 pool->size = size;
229 pool->flags = flags;
Willy Tarreau2b718102021-04-21 07:32:39 +0200230 LIST_APPEND(start, &pool->list);
Christopher Faulet2f6d3c02019-06-25 21:45:59 +0200231
Willy Tarreau2d6f6282021-04-15 16:24:00 +0200232#ifdef CONFIG_HAP_POOLS
Christopher Faulet2f6d3c02019-06-25 21:45:59 +0200233 /* update per-thread pool cache if necessary */
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200234 for (thr = 0; thr < MAX_THREADS; thr++) {
235 LIST_INIT(&pool->cache[thr].list);
Christopher Faulet2f6d3c02019-06-25 21:45:59 +0200236 }
Willy Tarreaued891fd2020-06-01 19:00:28 +0200237#endif
Olivier Houchard8af97eb2020-02-01 17:45:32 +0100238 }
239 pool->users++;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200240 return pool;
241}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100242
Willy Tarreau13843642021-04-17 16:57:25 +0200243/* Tries to allocate an object for the pool <pool> using the system's allocator
244 * and directly returns it. The pool's allocated counter is checked and updated,
Willy Tarreau8715dec2021-06-10 17:31:48 +0200245 * but no other checks are performed.
Willy Tarreau13843642021-04-17 16:57:25 +0200246 */
247void *pool_get_from_os(struct pool_head *pool)
248{
249 if (!pool->limit || pool->allocated < pool->limit) {
250 void *ptr = pool_alloc_area(pool->size + POOL_EXTRA);
251 if (ptr) {
252 _HA_ATOMIC_INC(&pool->allocated);
253 return ptr;
254 }
255 _HA_ATOMIC_INC(&pool->failed);
256 }
257 activity[tid].pool_fail++;
258 return NULL;
259
260}
261
Willy Tarreau45e4e282021-04-17 17:48:40 +0200262/* Releases a pool item back to the operating system and atomically updates
263 * the allocation counter.
264 */
265void pool_put_to_os(struct pool_head *pool, void *ptr)
266{
Willy Tarreau9a7aa3b2021-06-10 17:20:19 +0200267#ifdef DEBUG_UAF
268 /* This object will be released for real in order to detect a use after
269 * free. We also force a write to the area to ensure we crash on double
270 * free or free of a const area.
271 */
272 *(uint32_t *)ptr = 0xDEADADD4;
273#endif /* DEBUG_UAF */
274
Willy Tarreau45e4e282021-04-17 17:48:40 +0200275 pool_free_area(ptr, pool->size + POOL_EXTRA);
276 _HA_ATOMIC_DEC(&pool->allocated);
277}
278
Willy Tarreau8fe726f2021-04-15 18:20:12 +0200279/* Tries to allocate an object for the pool <pool> using the system's allocator
280 * and directly returns it. The pool's counters are updated but the object is
281 * never cached, so this is usable with and without local or shared caches.
Willy Tarreau8fe726f2021-04-15 18:20:12 +0200282 */
283void *pool_alloc_nocache(struct pool_head *pool)
Willy Tarreau0bae0752021-03-02 20:05:09 +0100284{
Willy Tarreau0bae0752021-03-02 20:05:09 +0100285 void *ptr = NULL;
286
Willy Tarreau13843642021-04-17 16:57:25 +0200287 ptr = pool_get_from_os(pool);
288 if (!ptr)
Willy Tarreau0bae0752021-03-02 20:05:09 +0100289 return NULL;
Willy Tarreau0bae0752021-03-02 20:05:09 +0100290
Willy Tarreau13843642021-04-17 16:57:25 +0200291 swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used, POOL_AVG_SAMPLES/4);
Willy Tarreau4781b152021-04-06 13:53:36 +0200292 _HA_ATOMIC_INC(&pool->used);
Willy Tarreau0bae0752021-03-02 20:05:09 +0100293
Willy Tarreau0bae0752021-03-02 20:05:09 +0100294 /* keep track of where the element was allocated from */
Willy Tarreau8c492702022-01-01 17:10:50 +0100295 POOL_DEBUG_SET_MARK(pool, ptr);
Willy Tarreauf70fdde2022-01-25 15:56:50 +0100296 POOL_DEBUG_TRACE_CALLER(pool, (struct pool_cache_item *)ptr, NULL);
Willy Tarreau0bae0752021-03-02 20:05:09 +0100297 return ptr;
298}
299
Willy Tarreau45e4e282021-04-17 17:48:40 +0200300/* Release a pool item back to the OS and keeps the pool's counters up to date.
301 * This is always defined even when pools are not enabled (their usage stats
302 * are maintained).
303 */
304void pool_free_nocache(struct pool_head *pool, void *ptr)
305{
306 _HA_ATOMIC_DEC(&pool->used);
307 swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
308 pool_put_to_os(pool, ptr);
309}
310
Willy Tarreaub8498e92021-04-18 10:23:02 +0200311
312#ifdef CONFIG_HAP_POOLS
313
Willy Tarreaua0b58312022-01-02 17:19:14 +0100314/* removes up to <count> items from the end of the local pool cache <ph> for
315 * pool <pool>. The shared pool is refilled with these objects in the limit
316 * of the number of acceptable objects, and the rest will be released to the
317 * OS. It is not a problem is <count> is larger than the number of objects in
318 * the local cache. The counters are automatically updated.
Willy Tarreau87212032021-04-19 08:14:03 +0200319 */
Willy Tarreaua0b58312022-01-02 17:19:14 +0100320static void pool_evict_last_items(struct pool_head *pool, struct pool_cache_head *ph, uint count)
Willy Tarreau87212032021-04-19 08:14:03 +0200321{
Willy Tarreau87212032021-04-19 08:14:03 +0200322 struct pool_cache_item *item;
Willy Tarreau1513c542022-01-02 17:53:02 +0100323 struct pool_item *pi, *head = NULL;
Willy Tarreaua0b58312022-01-02 17:19:14 +0100324 uint released = 0;
Willy Tarreau1513c542022-01-02 17:53:02 +0100325 uint cluster = 0;
Willy Tarreau361e31e2022-01-02 00:27:06 +0100326 uint to_free_max;
327
328 to_free_max = pool_releasable(pool);
Willy Tarreau87212032021-04-19 08:14:03 +0200329
Willy Tarreaua0b58312022-01-02 17:19:14 +0100330 while (released < count && !LIST_ISEMPTY(&ph->list)) {
Willy Tarreaud5ec1002022-01-02 12:40:14 +0100331 item = LIST_PREV(&ph->list, typeof(item), by_pool);
Willy Tarreau0575d8f2022-01-21 19:00:25 +0100332 pool_check_pattern(ph, item, pool->size);
Willy Tarreau2b718102021-04-21 07:32:39 +0200333 LIST_DELETE(&item->by_pool);
334 LIST_DELETE(&item->by_lru);
Willy Tarreaub46674a2021-12-30 17:37:33 +0100335
Willy Tarreau1513c542022-01-02 17:53:02 +0100336 if (to_free_max > released || cluster) {
Willy Tarreau361e31e2022-01-02 00:27:06 +0100337 pi = (struct pool_item *)item;
Willy Tarreau1513c542022-01-02 17:53:02 +0100338 pi->next = NULL;
339 pi->down = head;
340 head = pi;
341 cluster++;
342 if (cluster >= CONFIG_HAP_POOL_CLUSTER_SIZE) {
343 /* enough to make a cluster */
344 pool_put_to_shared_cache(pool, head, cluster);
345 cluster = 0;
346 head = NULL;
347 }
Willy Tarreau361e31e2022-01-02 00:27:06 +0100348 } else
Willy Tarreaub46674a2021-12-30 17:37:33 +0100349 pool_free_nocache(pool, item);
Willy Tarreau1513c542022-01-02 17:53:02 +0100350
351 released++;
Willy Tarreau361e31e2022-01-02 00:27:06 +0100352 }
353
Willy Tarreau1513c542022-01-02 17:53:02 +0100354 /* incomplete cluster left */
355 if (cluster)
356 pool_put_to_shared_cache(pool, head, cluster);
357
Willy Tarreaua0b58312022-01-02 17:19:14 +0100358 ph->count -= released;
359 pool_cache_count -= released;
360 pool_cache_bytes -= released * pool->size;
361}
362
363/* Evicts some of the oldest objects from one local cache, until its number of
364 * objects is no more than 16+1/8 of the total number of locally cached objects
365 * or the total size of the local cache is no more than 75% of its maximum (i.e.
366 * we don't want a single cache to use all the cache for itself). For this, the
Willy Tarreauc895c442022-02-09 16:19:24 +0100367 * list is scanned in reverse. If <full> is non-null, all objects are evicted.
Willy Tarreaua0b58312022-01-02 17:19:14 +0100368 */
Willy Tarreauc895c442022-02-09 16:19:24 +0100369void pool_evict_from_local_cache(struct pool_head *pool, int full)
Willy Tarreaua0b58312022-01-02 17:19:14 +0100370{
371 struct pool_cache_head *ph = &pool->cache[tid];
372
Willy Tarreauc895c442022-02-09 16:19:24 +0100373 while ((ph->count && full) ||
374 (ph->count >= CONFIG_HAP_POOL_CLUSTER_SIZE &&
375 ph->count >= 16 + pool_cache_count / 8 &&
376 pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4)) {
Willy Tarreau43937e92022-01-02 17:24:55 +0100377 pool_evict_last_items(pool, ph, CONFIG_HAP_POOL_CLUSTER_SIZE);
Willy Tarreau87212032021-04-19 08:14:03 +0200378 }
379}
380
Willy Tarreaub8498e92021-04-18 10:23:02 +0200381/* Evicts some of the oldest objects from the local cache, pushing them to the
382 * global pool.
383 */
384void pool_evict_from_local_caches()
385{
386 struct pool_cache_item *item;
387 struct pool_cache_head *ph;
388 struct pool_head *pool;
389
390 do {
Willy Tarreaub4e34762021-09-30 19:02:18 +0200391 item = LIST_PREV(&th_ctx->pool_lru_head, struct pool_cache_item *, by_lru);
Willy Tarreaub8498e92021-04-18 10:23:02 +0200392 /* note: by definition we remove oldest objects so they also are the
393 * oldest in their own pools, thus their next is the pool's head.
394 */
395 ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
396 pool = container_of(ph - tid, struct pool_head, cache);
Willy Tarreau43937e92022-01-02 17:24:55 +0100397 pool_evict_last_items(pool, ph, CONFIG_HAP_POOL_CLUSTER_SIZE);
Willy Tarreaub8498e92021-04-18 10:23:02 +0200398 } while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
399}
Willy Tarreau0bae0752021-03-02 20:05:09 +0100400
Willy Tarreaub2a853d2021-04-19 11:49:26 +0200401/* Frees an object to the local cache, possibly pushing oldest objects to the
402 * shared cache, which itself may decide to release some of them to the OS.
403 * While it is unspecified what the object becomes past this point, it is
Willy Tarreauadd43fa2022-01-24 15:52:51 +0100404 * guaranteed to be released from the users' perpective. A caller address may
405 * be passed and stored into the area when DEBUG_POOL_TRACING is set.
Willy Tarreaub2a853d2021-04-19 11:49:26 +0200406 */
Willy Tarreau0e2a5b42022-01-24 15:51:50 +0100407void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller)
Willy Tarreaub2a853d2021-04-19 11:49:26 +0200408{
409 struct pool_cache_item *item = (struct pool_cache_item *)ptr;
410 struct pool_cache_head *ph = &pool->cache[tid];
411
Willy Tarreau2b718102021-04-21 07:32:39 +0200412 LIST_INSERT(&ph->list, &item->by_pool);
Willy Tarreaub4e34762021-09-30 19:02:18 +0200413 LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
Willy Tarreauadd43fa2022-01-24 15:52:51 +0100414 POOL_DEBUG_TRACE_CALLER(pool, item, caller);
Willy Tarreaub2a853d2021-04-19 11:49:26 +0200415 ph->count++;
Willy Tarreau0575d8f2022-01-21 19:00:25 +0100416 pool_fill_pattern(ph, item, pool->size);
Willy Tarreaub2a853d2021-04-19 11:49:26 +0200417 pool_cache_count++;
418 pool_cache_bytes += pool->size;
419
420 if (unlikely(pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4)) {
Willy Tarreau43937e92022-01-02 17:24:55 +0100421 if (ph->count >= 16 + pool_cache_count / 8 + CONFIG_HAP_POOL_CLUSTER_SIZE)
Willy Tarreauc895c442022-02-09 16:19:24 +0100422 pool_evict_from_local_cache(pool, 0);
Willy Tarreaub2a853d2021-04-19 11:49:26 +0200423 if (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE)
424 pool_evict_from_local_caches();
425 }
426}
427
Willy Tarreaueb3cc292021-04-15 18:13:13 +0200428#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
429
Willy Tarreau0bae0752021-03-02 20:05:09 +0100430/* legacy stuff */
431void pool_flush(struct pool_head *pool)
432{
433}
434
435/* This function might ask the malloc library to trim its buffers. */
436void pool_gc(struct pool_head *pool_ctx)
437{
Willy Tarreauea3323f2021-09-15 10:38:21 +0200438 trim_all_pools();
Willy Tarreau0bae0752021-03-02 20:05:09 +0100439}
440
Willy Tarreau9b3ed512021-06-10 10:21:35 +0200441#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
442
Willy Tarreauafe2c4a2021-12-30 17:09:31 +0100443/* Tries to refill the local cache <pch> from the shared one for pool <pool>.
444 * This is only used when pools are in use and shared pools are enabled. No
445 * malloc() is attempted, and poisonning is never performed. The purpose is to
446 * get the fastest possible refilling so that the caller can easily check if
447 * the cache has enough objects for its use.
448 */
449void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch)
450{
451 struct pool_cache_item *item;
Willy Tarreau148160b2022-01-02 14:35:57 +0100452 struct pool_item *ret, *down;
453 uint count;
Willy Tarreauafe2c4a2021-12-30 17:09:31 +0100454
455 /* we'll need to reference the first element to figure the next one. We
456 * must temporarily lock it so that nobody allocates then releases it,
457 * or the dereference could fail.
458 */
459 ret = _HA_ATOMIC_LOAD(&pool->free_list);
460 do {
461 while (unlikely(ret == POOL_BUSY)) {
462 __ha_cpu_relax();
463 ret = _HA_ATOMIC_LOAD(&pool->free_list);
464 }
465 if (ret == NULL)
466 return;
467 } while (unlikely((ret = _HA_ATOMIC_XCHG(&pool->free_list, POOL_BUSY)) == POOL_BUSY));
468
469 if (unlikely(ret == NULL)) {
470 HA_ATOMIC_STORE(&pool->free_list, NULL);
471 return;
472 }
473
474 /* this releases the lock */
Willy Tarreauc16ed3b2022-01-01 18:22:20 +0100475 HA_ATOMIC_STORE(&pool->free_list, ret->next);
Willy Tarreauafe2c4a2021-12-30 17:09:31 +0100476
Willy Tarreau148160b2022-01-02 14:35:57 +0100477 /* now store the retrieved object(s) into the local cache */
478 count = 0;
479 for (; ret; ret = down) {
480 down = ret->down;
481 /* keep track of where the element was allocated from */
482 POOL_DEBUG_SET_MARK(pool, ret);
Willy Tarreauafe2c4a2021-12-30 17:09:31 +0100483
Willy Tarreau148160b2022-01-02 14:35:57 +0100484 item = (struct pool_cache_item *)ret;
Willy Tarreauf70fdde2022-01-25 15:56:50 +0100485 POOL_DEBUG_TRACE_CALLER(pool, item, NULL);
Willy Tarreau148160b2022-01-02 14:35:57 +0100486 LIST_INSERT(&pch->list, &item->by_pool);
487 LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
488 count++;
Willy Tarreau0575d8f2022-01-21 19:00:25 +0100489 pool_fill_pattern(pch, item, pool->size);
Willy Tarreau148160b2022-01-02 14:35:57 +0100490 }
491 HA_ATOMIC_ADD(&pool->used, count);
492 pch->count += count;
493 pool_cache_count += count;
494 pool_cache_bytes += count * pool->size;
Willy Tarreauafe2c4a2021-12-30 17:09:31 +0100495}
496
Willy Tarreau337410c2022-01-02 15:15:54 +0100497/* Adds pool item cluster <item> to the shared cache, which contains <count>
498 * elements. The caller is advised to first check using pool_releasable() if
499 * it's wise to add this series of objects there. Both the pool and the item's
500 * head must be valid.
Willy Tarreaub46674a2021-12-30 17:37:33 +0100501 */
Willy Tarreau337410c2022-01-02 15:15:54 +0100502void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, uint count)
Willy Tarreaub46674a2021-12-30 17:37:33 +0100503{
Willy Tarreauc16ed3b2022-01-01 18:22:20 +0100504 struct pool_item *free_list;
Willy Tarreaub46674a2021-12-30 17:37:33 +0100505
Willy Tarreau337410c2022-01-02 15:15:54 +0100506 _HA_ATOMIC_SUB(&pool->used, count);
Willy Tarreaub46674a2021-12-30 17:37:33 +0100507 free_list = _HA_ATOMIC_LOAD(&pool->free_list);
508 do {
509 while (unlikely(free_list == POOL_BUSY)) {
510 __ha_cpu_relax();
511 free_list = _HA_ATOMIC_LOAD(&pool->free_list);
512 }
Willy Tarreauc16ed3b2022-01-01 18:22:20 +0100513 _HA_ATOMIC_STORE(&item->next, free_list);
Willy Tarreaub46674a2021-12-30 17:37:33 +0100514 __ha_barrier_atomic_store();
515 } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, item));
516 __ha_barrier_atomic_store();
517 swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
518}
519
Olivier Houchardcf975d42018-01-24 18:38:31 +0100520/*
521 * This function frees whatever can be freed in pool <pool>.
522 */
523void pool_flush(struct pool_head *pool)
524{
Willy Tarreau148160b2022-01-02 14:35:57 +0100525 struct pool_item *next, *temp, *down;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100526
527 if (!pool)
528 return;
Willy Tarreau2a4523f2021-06-09 18:59:58 +0200529
530 /* The loop below atomically detaches the head of the free list and
531 * replaces it with a NULL. Then the list can be released.
532 */
533 next = pool->free_list;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100534 do {
Willy Tarreau2a4523f2021-06-09 18:59:58 +0200535 while (unlikely(next == POOL_BUSY)) {
536 __ha_cpu_relax();
537 next = _HA_ATOMIC_LOAD(&pool->free_list);
538 }
539 if (next == NULL)
540 return;
541 } while (unlikely((next = _HA_ATOMIC_XCHG(&pool->free_list, POOL_BUSY)) == POOL_BUSY));
542 _HA_ATOMIC_STORE(&pool->free_list, NULL);
Olivier Houchard20872762019-03-08 18:53:35 +0100543 __ha_barrier_atomic_store();
Willy Tarreau2a4523f2021-06-09 18:59:58 +0200544
Olivier Houchardcf975d42018-01-24 18:38:31 +0100545 while (next) {
546 temp = next;
Willy Tarreauc16ed3b2022-01-01 18:22:20 +0100547 next = temp->next;
Willy Tarreau148160b2022-01-02 14:35:57 +0100548 for (; temp; temp = down) {
549 down = temp->down;
550 pool_put_to_os(pool, temp);
551 }
Olivier Houchardcf975d42018-01-24 18:38:31 +0100552 }
Willy Tarreauc239cde2021-06-10 06:54:22 +0200553 /* here, we should have pool->allocated == pool->used */
Olivier Houchardcf975d42018-01-24 18:38:31 +0100554}
555
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200556/*
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200557 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200558 * the minimum thresholds imposed by owners. It makes sure to be alone to
559 * run by using thread_isolate(). <pool_ctx> is unused.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200560 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100561void pool_gc(struct pool_head *pool_ctx)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200562{
563 struct pool_head *entry;
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200564 int isolated = thread_isolated();
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200565
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200566 if (!isolated)
567 thread_isolate();
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200568
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200569 list_for_each_entry(entry, &pools, list) {
Willy Tarreau148160b2022-01-02 14:35:57 +0100570 struct pool_item *temp, *down;
Willy Tarreauc16ed3b2022-01-01 18:22:20 +0100571
Olivier Houchard51d93392020-03-12 19:05:39 +0100572 while (entry->free_list &&
Willy Tarreau57767b82014-12-22 21:40:55 +0100573 (int)(entry->allocated - entry->used) > (int)entry->minavail) {
Olivier Houchard51d93392020-03-12 19:05:39 +0100574 temp = entry->free_list;
Willy Tarreauc16ed3b2022-01-01 18:22:20 +0100575 entry->free_list = temp->next;
Willy Tarreau148160b2022-01-02 14:35:57 +0100576 for (; temp; temp = down) {
577 down = temp->down;
578 pool_put_to_os(entry, temp);
579 }
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200580 }
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200581 }
Christopher Fauletb349e482017-08-29 09:52:38 +0200582
Willy Tarreauea3323f2021-09-15 10:38:21 +0200583 trim_all_pools();
Willy Tarreau26ed1832021-06-10 08:40:16 +0200584
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200585 if (!isolated)
586 thread_release();
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200587}
Willy Tarreau9b3ed512021-06-10 10:21:35 +0200588#endif /* CONFIG_HAP_NO_GLOBAL_POOLS */
Willy Tarreaub8498e92021-04-18 10:23:02 +0200589
590#else /* CONFIG_HAP_POOLS */
591
592/* legacy stuff */
593void pool_flush(struct pool_head *pool)
594{
595}
596
597/* This function might ask the malloc library to trim its buffers. */
598void pool_gc(struct pool_head *pool_ctx)
599{
Willy Tarreauea3323f2021-09-15 10:38:21 +0200600 trim_all_pools();
Willy Tarreaub8498e92021-04-18 10:23:02 +0200601}
602
603#endif /* CONFIG_HAP_POOLS */
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200604
Willy Tarreau15c322c2022-01-24 11:51:43 +0100605/*
Willy Tarreaud3929732022-01-24 16:09:29 +0100606 * Returns a pointer to type <type> taken from the pool <pool_type> or
607 * dynamically allocated. In the first case, <pool_type> is updated to point to
608 * the next element in the list. <flags> is a binary-OR of POOL_F_* flags.
609 * Prefer using pool_alloc() which does the right thing without flags.
610 */
611void *__pool_alloc(struct pool_head *pool, unsigned int flags)
612{
613 void *p = NULL;
Willy Tarreau0e2a5b42022-01-24 15:51:50 +0100614 void *caller = NULL;
Willy Tarreaud3929732022-01-24 16:09:29 +0100615
616#ifdef DEBUG_FAIL_ALLOC
617 if (unlikely(!(flags & POOL_F_NO_FAIL) && mem_should_fail(pool)))
618 return NULL;
619#endif
620
Willy Tarreauadd43fa2022-01-24 15:52:51 +0100621#if defined(DEBUG_POOL_TRACING)
622 caller = __builtin_return_address(0);
623#endif
Willy Tarreaud3929732022-01-24 16:09:29 +0100624 if (!p)
Willy Tarreau0e2a5b42022-01-24 15:51:50 +0100625 p = pool_get_from_cache(pool, caller);
Willy Tarreaud3929732022-01-24 16:09:29 +0100626 if (unlikely(!p))
627 p = pool_alloc_nocache(pool);
628
629 if (likely(p)) {
630 if (unlikely(flags & POOL_F_MUST_ZERO))
631 memset(p, 0, pool->size);
632 else if (unlikely(!(flags & POOL_F_NO_POISON) && mem_poison_byte >= 0))
633 memset(p, mem_poison_byte, pool->size);
634 }
635 return p;
636}
637
638/*
Willy Tarreau15c322c2022-01-24 11:51:43 +0100639 * Puts a memory area back to the corresponding pool. <ptr> be valid. Using
640 * pool_free() is preferred.
641 */
642void __pool_free(struct pool_head *pool, void *ptr)
643{
Willy Tarreau0e2a5b42022-01-24 15:51:50 +0100644 const void *caller = NULL;
645
Willy Tarreauadd43fa2022-01-24 15:52:51 +0100646#if defined(DEBUG_POOL_TRACING)
647 caller = __builtin_return_address(0);
648#endif
Willy Tarreau15c322c2022-01-24 11:51:43 +0100649 /* we'll get late corruption if we refill to the wrong pool or double-free */
650 POOL_DEBUG_CHECK_MARK(pool, ptr);
651
652 if (unlikely(mem_poison_byte >= 0))
653 memset(ptr, mem_poison_byte, pool->size);
654
Willy Tarreau0e2a5b42022-01-24 15:51:50 +0100655 pool_put_to_cache(pool, ptr, caller);
Willy Tarreau15c322c2022-01-24 11:51:43 +0100656}
657
Willy Tarreauf14d1902021-10-05 18:14:11 +0200658
659#ifdef DEBUG_UAF
660
661/************* use-after-free allocator *************/
662
663/* allocates an area of size <size> and returns it. The semantics are similar
664 * to those of malloc(). However the allocation is rounded up to 4kB so that a
665 * full page is allocated. This ensures the object can be freed alone so that
666 * future dereferences are easily detected. The returned object is always
667 * 16-bytes aligned to avoid issues with unaligned structure objects. In case
668 * some padding is added, the area's start address is copied at the end of the
669 * padding to help detect underflows.
670 */
671void *pool_alloc_area_uaf(size_t size)
672{
673 size_t pad = (4096 - size) & 0xFF0;
Willy Tarreauf14d1902021-10-05 18:14:11 +0200674 void *ret;
675
Willy Tarreauf14d1902021-10-05 18:14:11 +0200676 ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
677 if (ret != MAP_FAILED) {
678 /* let's dereference the page before returning so that the real
679 * allocation in the system is performed without holding the lock.
680 */
681 *(int *)ret = 0;
682 if (pad >= sizeof(void *))
683 *(void **)(ret + pad - sizeof(void *)) = ret + pad;
684 ret += pad;
685 } else {
686 ret = NULL;
687 }
Willy Tarreauf14d1902021-10-05 18:14:11 +0200688 return ret;
689}
690
691/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
692 * semantics are identical to free() except that the size must absolutely match
693 * the one passed to pool_alloc_area(). In case some padding is added, the
694 * area's start address is compared to the one at the end of the padding, and
695 * a segfault is triggered if they don't match, indicating an underflow.
696 */
697void pool_free_area_uaf(void *area, size_t size)
698{
699 size_t pad = (4096 - size) & 0xFF0;
700
701 if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area)
702 ABORT_NOW();
703
Willy Tarreauf14d1902021-10-05 18:14:11 +0200704 munmap(area - pad, (size + 4095) & -4096);
Willy Tarreauf14d1902021-10-05 18:14:11 +0200705}
706
707#endif /* DEBUG_UAF */
708
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200709/*
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200710 * This function destroys a pool by freeing it completely, unless it's still
711 * in use. This should be called only under extreme circumstances. It always
712 * returns NULL if the resulting pool is empty, easing the clearing of the old
713 * pointer, otherwise it returns the pool.
714 * .
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200715 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100716void *pool_destroy(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200717{
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200718 if (pool) {
Willy Tarreauc895c442022-02-09 16:19:24 +0100719#ifdef CONFIG_HAP_POOLS
720 pool_evict_from_local_cache(pool, 1);
721#endif
Willy Tarreaubafbe012017-11-24 17:34:44 +0100722 pool_flush(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200723 if (pool->used)
724 return pool;
725 pool->users--;
726 if (!pool->users) {
Willy Tarreau2b718102021-04-21 07:32:39 +0200727 LIST_DELETE(&pool->list);
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200728 /* note that if used == 0, the cache is empty */
729 free(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200730 }
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200731 }
732 return NULL;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200733}
734
Willy Tarreau2455ceb2018-11-26 15:57:34 +0100735/* This destroys all pools on exit. It is *not* thread safe. */
736void pool_destroy_all()
737{
738 struct pool_head *entry, *back;
739
740 list_for_each_entry_safe(entry, back, &pools, list)
741 pool_destroy(entry);
742}
743
Willy Tarreau12833bb2014-01-28 16:49:56 +0100744/* This function dumps memory usage information into the trash buffer. */
745void dump_pools_to_trash()
Willy Tarreau50e608d2007-05-13 18:26:08 +0200746{
747 struct pool_head *entry;
748 unsigned long allocated, used;
749 int nbpools;
Willy Tarreau1b4a7142021-10-07 16:29:31 +0200750#ifdef CONFIG_HAP_POOLS
751 unsigned long cached_bytes = 0;
752 uint cached = 0;
753#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200754
755 allocated = used = nbpools = 0;
Willy Tarreau12833bb2014-01-28 16:49:56 +0100756 chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200757 list_for_each_entry(entry, &pools, list) {
Willy Tarreau1b4a7142021-10-07 16:29:31 +0200758#ifdef CONFIG_HAP_POOLS
759 int i;
760 for (cached = i = 0; i < global.nbthread; i++)
761 cached += entry->cache[i].count;
762 cached_bytes += cached * entry->size;
763#endif
764 chunk_appendf(&trash, " - Pool %s (%u bytes) : %u allocated (%u bytes), %u used"
765#ifdef CONFIG_HAP_POOLS
766 " (~%u by thread caches)"
767#endif
768 ", needed_avg %u, %u failures, %u users, @%p%s\n",
769 entry->name, entry->size, entry->allocated,
770 entry->size * entry->allocated, entry->used,
771#ifdef CONFIG_HAP_POOLS
772 cached,
773#endif
774 swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES), entry->failed,
775 entry->users, entry,
776 (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200777
778 allocated += entry->allocated * entry->size;
779 used += entry->used * entry->size;
780 nbpools++;
781 }
Willy Tarreau1b4a7142021-10-07 16:29:31 +0200782 chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used"
783#ifdef CONFIG_HAP_POOLS
784 " (~%lu by thread caches)"
785#endif
786 ".\n",
787 nbpools, allocated, used
788#ifdef CONFIG_HAP_POOLS
789 , cached_bytes
790#endif
791 );
Willy Tarreau50e608d2007-05-13 18:26:08 +0200792}
793
Willy Tarreau12833bb2014-01-28 16:49:56 +0100794/* Dump statistics on pools usage. */
795void dump_pools(void)
796{
797 dump_pools_to_trash();
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200798 qfprintf(stderr, "%s", trash.area);
Willy Tarreau12833bb2014-01-28 16:49:56 +0100799}
800
Willy Tarreau58102cf2015-10-28 16:24:21 +0100801/* This function returns the total number of failed pool allocations */
802int pool_total_failures()
803{
804 struct pool_head *entry;
805 int failed = 0;
806
807 list_for_each_entry(entry, &pools, list)
808 failed += entry->failed;
809 return failed;
810}
811
812/* This function returns the total amount of memory allocated in pools (in bytes) */
813unsigned long pool_total_allocated()
814{
815 struct pool_head *entry;
816 unsigned long allocated = 0;
817
818 list_for_each_entry(entry, &pools, list)
819 allocated += entry->allocated * entry->size;
820 return allocated;
821}
822
823/* This function returns the total amount of memory used in pools (in bytes) */
824unsigned long pool_total_used()
825{
826 struct pool_head *entry;
827 unsigned long used = 0;
828
829 list_for_each_entry(entry, &pools, list)
830 used += entry->used * entry->size;
831 return used;
832}
833
William Lallemande7ed8852016-11-19 02:25:36 +0100834/* This function dumps memory usage information onto the stream interface's
835 * read buffer. It returns 0 as long as it does not complete, non-zero upon
836 * completion. No state is used.
837 */
838static int cli_io_handler_dump_pools(struct appctx *appctx)
839{
840 struct stream_interface *si = appctx->owner;
841
842 dump_pools_to_trash();
Willy Tarreau06d80a92017-10-19 14:32:15 +0200843 if (ci_putchk(si_ic(si), &trash) == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +0100844 si_rx_room_blk(si);
William Lallemande7ed8852016-11-19 02:25:36 +0100845 return 0;
846 }
847 return 1;
848}
849
Willy Tarreau7107c8b2018-11-26 11:44:35 +0100850/* callback used to create early pool <name> of size <size> and store the
851 * resulting pointer into <ptr>. If the allocation fails, it quits with after
852 * emitting an error message.
853 */
854void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
855{
856 *ptr = create_pool(name, size, MEM_F_SHARED);
857 if (!*ptr) {
858 ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
859 name, size, strerror(errno));
860 exit(1);
861 }
862}
863
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100864/* Initializes all per-thread arrays on startup */
865static void init_pools()
866{
Willy Tarreau2d6f6282021-04-15 16:24:00 +0200867#ifdef CONFIG_HAP_POOLS
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200868 int thr;
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100869
870 for (thr = 0; thr < MAX_THREADS; thr++) {
Willy Tarreaub4e34762021-09-30 19:02:18 +0200871 LIST_INIT(&ha_thread_ctx[thr].pool_lru_head);
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100872 }
Willy Tarreaued891fd2020-06-01 19:00:28 +0200873#endif
Willy Tarreau157e3932021-09-15 10:05:48 +0200874 detect_allocator();
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100875}
876
877INITCALL0(STG_PREPARE, init_pools);
Willy Tarreau7107c8b2018-11-26 11:44:35 +0100878
Willy Tarreau845b5602021-09-15 10:41:24 +0200879/* Report in build options if trim is supported */
880static void pools_register_build_options(void)
881{
882 if (is_trim_enabled()) {
883 char *ptr = NULL;
884 memprintf(&ptr, "Support for malloc_trim() is enabled.");
885 hap_register_build_opts(ptr, 1);
886 }
887}
888INITCALL0(STG_REGISTER, pools_register_build_options);
889
William Lallemande7ed8852016-11-19 02:25:36 +0100890/* register cli keywords */
891static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaub205bfd2021-05-07 11:38:37 +0200892 { { "show", "pools", NULL }, "show pools : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
William Lallemande7ed8852016-11-19 02:25:36 +0100893 {{},}
894}};
895
Willy Tarreau0108d902018-11-25 19:14:37 +0100896INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
William Lallemande7ed8852016-11-19 02:25:36 +0100897
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100898#ifdef DEBUG_FAIL_ALLOC
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100899
900int mem_should_fail(const struct pool_head *pool)
901{
Olivier Houchard9c4f08a2019-02-01 16:28:04 +0100902 int ret = 0;
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100903
904 if (mem_fail_rate > 0 && !(global.mode & MODE_STARTING)) {
Willy Tarreau20f88ab2021-04-17 15:50:28 +0200905 if (mem_fail_rate > statistical_prng_range(100))
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100906 ret = 1;
907 else
908 ret = 0;
909 }
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100910 return ret;
911
912}
913
914/* config parser for global "tune.fail-alloc" */
915static int mem_parse_global_fail_alloc(char **args, int section_type, struct proxy *curpx,
Amaury Denoyelle3b1c9a32021-03-22 11:21:36 +0100916 const struct proxy *defpx, const char *file, int line,
917 char **err)
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100918{
919 if (too_many_args(1, args, err, NULL))
920 return -1;
921 mem_fail_rate = atoi(args[1]);
922 if (mem_fail_rate < 0 || mem_fail_rate > 100) {
923 memprintf(err, "'%s' expects a numeric value between 0 and 100.", args[0]);
924 return -1;
925 }
926 return 0;
927}
928#endif
929
930/* register global config keywords */
931static struct cfg_kw_list mem_cfg_kws = {ILH, {
932#ifdef DEBUG_FAIL_ALLOC
933 { CFG_GLOBAL, "tune.fail-alloc", mem_parse_global_fail_alloc },
934#endif
935 { 0, NULL, NULL }
936}};
937
938INITCALL1(STG_REGISTER, cfg_register_keywords, &mem_cfg_kws);
939
Willy Tarreau50e608d2007-05-13 18:26:08 +0200940/*
941 * Local variables:
942 * c-indent-level: 8
943 * c-basic-offset: 8
944 * End:
945 */