blob: 3cca6c7a29a8c009b0ce9f7e1f31786720341c7e [file] [log] [blame]
Willy Tarreau50e608d2007-05-13 18:26:08 +02001/*
2 * Memory management functions.
3 *
4 * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
Willy Tarreau7107c8b2018-11-26 11:44:35 +010012#include <errno.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020013
Willy Tarreaub2551052020-06-09 09:07:15 +020014#include <haproxy/activity-t.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020015#include <haproxy/api.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020016#include <haproxy/applet-t.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020017#include <haproxy/cfgparse.h>
Willy Tarreauf1d32c42020-06-04 21:07:02 +020018#include <haproxy/channel.h>
Willy Tarreau83487a82020-06-04 20:19:54 +020019#include <haproxy/cli.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020020#include <haproxy/errors.h>
Willy Tarreauf268ee82020-06-04 17:05:57 +020021#include <haproxy/global.h>
Willy Tarreau853b2972020-05-27 18:01:47 +020022#include <haproxy/list.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020023#include <haproxy/pool.h>
Willy Tarreau2eec9b52020-06-04 19:58:55 +020024#include <haproxy/stats-t.h>
Willy Tarreau5e539c92020-06-04 20:45:39 +020025#include <haproxy/stream_interface.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020026#include <haproxy/thread.h>
Willy Tarreau48fbcae2020-06-03 18:09:46 +020027#include <haproxy/tools.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020028
Willy Tarreau50e608d2007-05-13 18:26:08 +020029
Willy Tarreau2d6f6282021-04-15 16:24:00 +020030#ifdef CONFIG_HAP_POOLS
Willy Tarreau7f0165e2018-11-26 17:09:46 +010031/* These ones are initialized per-thread on startup by init_pools() */
Willy Tarreaue18db9e2018-10-16 10:28:54 +020032THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
33THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
Willy Tarreaued891fd2020-06-01 19:00:28 +020034#endif
Willy Tarreaue18db9e2018-10-16 10:28:54 +020035
Willy Tarreau50e608d2007-05-13 18:26:08 +020036static struct list pools = LIST_HEAD_INIT(pools);
Willy Tarreau067ac9f2015-10-08 14:12:13 +020037int mem_poison_byte = -1;
Willy Tarreau50e608d2007-05-13 18:26:08 +020038
Olivier Houcharddc21ff72019-01-29 15:20:16 +010039#ifdef DEBUG_FAIL_ALLOC
40static int mem_fail_rate = 0;
Olivier Houcharddc21ff72019-01-29 15:20:16 +010041#endif
42
Willy Tarreau50e608d2007-05-13 18:26:08 +020043/* Try to find an existing shared pool with the same characteristics and
44 * returns it, otherwise creates this one. NULL is returned if no memory
Willy Tarreau581bf812016-01-25 02:19:13 +010045 * is available for a new creation. Two flags are supported :
46 * - MEM_F_SHARED to indicate that the pool may be shared with other users
47 * - MEM_F_EXACT to indicate that the size must not be rounded up
Willy Tarreau50e608d2007-05-13 18:26:08 +020048 */
49struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
50{
51 struct pool_head *pool;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020052 struct pool_head *entry;
53 struct list *start;
Willy Tarreau50e608d2007-05-13 18:26:08 +020054 unsigned int align;
Willy Tarreau9f3129e2021-04-17 00:31:38 +020055 int thr __maybe_unused;
Willy Tarreau50e608d2007-05-13 18:26:08 +020056
Willy Tarreauac421112015-10-28 15:09:29 +010057 /* We need to store a (void *) at the end of the chunks. Since we know
Willy Tarreau50e608d2007-05-13 18:26:08 +020058 * that the malloc() function will never return such a small size,
59 * let's round the size up to something slightly bigger, in order to
60 * ease merging of entries. Note that the rounding is a power of two.
Willy Tarreauac421112015-10-28 15:09:29 +010061 * This extra (void *) is not accounted for in the size computation
62 * so that the visible parts outside are not affected.
Willy Tarreau30f931e2018-10-23 14:40:23 +020063 *
64 * Note: for the LRU cache, we need to store 2 doubly-linked lists.
Willy Tarreau50e608d2007-05-13 18:26:08 +020065 */
66
Willy Tarreau581bf812016-01-25 02:19:13 +010067 if (!(flags & MEM_F_EXACT)) {
Willy Tarreau30f931e2018-10-23 14:40:23 +020068 align = 4 * sizeof(void *); // 2 lists = 4 pointers min
Willy Tarreau581bf812016-01-25 02:19:13 +010069 size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
70 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020071
Christopher Fauletb349e482017-08-29 09:52:38 +020072 /* TODO: thread: we do not lock pool list for now because all pools are
73 * created during HAProxy startup (so before threads creation) */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020074 start = &pools;
Willy Tarreau50e608d2007-05-13 18:26:08 +020075 pool = NULL;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020076
77 list_for_each_entry(entry, &pools, list) {
78 if (entry->size == size) {
79 /* either we can share this place and we take it, or
Ilya Shipitsin47d17182020-06-21 21:42:57 +050080 * we look for a shareable one or for the next position
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020081 * before which we will insert a new one.
82 */
Willy Tarreau1ab6c0b2021-05-05 07:29:01 +020083 if ((flags & entry->flags & MEM_F_SHARED)
84#ifdef DEBUG_DONT_SHARE_POOLS
85 && strcmp(name, entry->name) == 0
86#endif
87 ) {
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020088 /* we can share this one */
Willy Tarreau50e608d2007-05-13 18:26:08 +020089 pool = entry;
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020090 DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
Willy Tarreau50e608d2007-05-13 18:26:08 +020091 break;
92 }
93 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020094 else if (entry->size > size) {
95 /* insert before this one */
96 start = &entry->list;
97 break;
98 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020099 }
100
101 if (!pool) {
Willy Tarreau0a93b642018-10-16 07:58:39 +0200102 if (!pool)
103 pool = calloc(1, sizeof(*pool));
104
Willy Tarreau50e608d2007-05-13 18:26:08 +0200105 if (!pool)
106 return NULL;
107 if (name)
108 strlcpy2(pool->name, name, sizeof(pool->name));
109 pool->size = size;
110 pool->flags = flags;
Willy Tarreau2b718102021-04-21 07:32:39 +0200111 LIST_APPEND(start, &pool->list);
Christopher Faulet2f6d3c02019-06-25 21:45:59 +0200112
Willy Tarreau2d6f6282021-04-15 16:24:00 +0200113#ifdef CONFIG_HAP_POOLS
Christopher Faulet2f6d3c02019-06-25 21:45:59 +0200114 /* update per-thread pool cache if necessary */
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200115 for (thr = 0; thr < MAX_THREADS; thr++) {
116 LIST_INIT(&pool->cache[thr].list);
Christopher Faulet2f6d3c02019-06-25 21:45:59 +0200117 }
Willy Tarreaued891fd2020-06-01 19:00:28 +0200118#endif
Olivier Houchard8af97eb2020-02-01 17:45:32 +0100119 HA_SPIN_INIT(&pool->lock);
Olivier Houchard8af97eb2020-02-01 17:45:32 +0100120 }
121 pool->users++;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200122 return pool;
123}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100124
Willy Tarreau13843642021-04-17 16:57:25 +0200125/* Tries to allocate an object for the pool <pool> using the system's allocator
126 * and directly returns it. The pool's allocated counter is checked and updated,
127 * but no other checks are performed. The pool's lock is not used and is not a
128 * problem either.
129 */
130void *pool_get_from_os(struct pool_head *pool)
131{
132 if (!pool->limit || pool->allocated < pool->limit) {
133 void *ptr = pool_alloc_area(pool->size + POOL_EXTRA);
134 if (ptr) {
135 _HA_ATOMIC_INC(&pool->allocated);
136 return ptr;
137 }
138 _HA_ATOMIC_INC(&pool->failed);
139 }
140 activity[tid].pool_fail++;
141 return NULL;
142
143}
144
Willy Tarreau45e4e282021-04-17 17:48:40 +0200145/* Releases a pool item back to the operating system and atomically updates
146 * the allocation counter.
147 */
148void pool_put_to_os(struct pool_head *pool, void *ptr)
149{
150 pool_free_area(ptr, pool->size + POOL_EXTRA);
151 _HA_ATOMIC_DEC(&pool->allocated);
152}
153
Willy Tarreau8fe726f2021-04-15 18:20:12 +0200154/* Tries to allocate an object for the pool <pool> using the system's allocator
155 * and directly returns it. The pool's counters are updated but the object is
156 * never cached, so this is usable with and without local or shared caches.
157 * This may be called with or without the pool lock held, so it must not use
158 * the pool's lock.
159 */
160void *pool_alloc_nocache(struct pool_head *pool)
Willy Tarreau0bae0752021-03-02 20:05:09 +0100161{
Willy Tarreau0bae0752021-03-02 20:05:09 +0100162 void *ptr = NULL;
163
Willy Tarreau13843642021-04-17 16:57:25 +0200164 ptr = pool_get_from_os(pool);
165 if (!ptr)
Willy Tarreau0bae0752021-03-02 20:05:09 +0100166 return NULL;
Willy Tarreau0bae0752021-03-02 20:05:09 +0100167
Willy Tarreau13843642021-04-17 16:57:25 +0200168 swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used, POOL_AVG_SAMPLES/4);
Willy Tarreau4781b152021-04-06 13:53:36 +0200169 _HA_ATOMIC_INC(&pool->used);
Willy Tarreau0bae0752021-03-02 20:05:09 +0100170
171#ifdef DEBUG_MEMORY_POOLS
172 /* keep track of where the element was allocated from */
173 *POOL_LINK(pool, ptr) = (void *)pool;
174#endif
175 return ptr;
176}
177
Willy Tarreau45e4e282021-04-17 17:48:40 +0200178/* Release a pool item back to the OS and keeps the pool's counters up to date.
179 * This is always defined even when pools are not enabled (their usage stats
180 * are maintained).
181 */
182void pool_free_nocache(struct pool_head *pool, void *ptr)
183{
184 _HA_ATOMIC_DEC(&pool->used);
185 swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
186 pool_put_to_os(pool, ptr);
187}
188
Willy Tarreaub8498e92021-04-18 10:23:02 +0200189
190#ifdef CONFIG_HAP_POOLS
191
Willy Tarreau87212032021-04-19 08:14:03 +0200192/* Evicts some of the oldest objects from one local cache, until its number of
193 * objects is no more than 16+1/8 of the total number of locally cached objects
194 * or the total size of the local cache is no more than 75% of its maximum (i.e.
195 * we don't want a single cache to use all the cache for itself). For this, the
196 * list is scanned in reverse.
197 */
198void pool_evict_from_local_cache(struct pool_head *pool)
199{
200 struct pool_cache_head *ph = &pool->cache[tid];
201 struct pool_cache_item *item;
Willy Tarreau87212032021-04-19 08:14:03 +0200202
203 while (ph->count >= 16 + pool_cache_count / 8 &&
204 pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4) {
205 item = LIST_NEXT(&ph->list, typeof(item), by_pool);
206 ph->count--;
207 pool_cache_bytes -= pool->size;
208 pool_cache_count--;
Willy Tarreau2b718102021-04-21 07:32:39 +0200209 LIST_DELETE(&item->by_pool);
210 LIST_DELETE(&item->by_lru);
Willy Tarreau87212032021-04-19 08:14:03 +0200211 pool_put_to_shared_cache(pool, item);
212 }
213}
214
Willy Tarreaub8498e92021-04-18 10:23:02 +0200215/* Evicts some of the oldest objects from the local cache, pushing them to the
216 * global pool.
217 */
218void pool_evict_from_local_caches()
219{
220 struct pool_cache_item *item;
221 struct pool_cache_head *ph;
222 struct pool_head *pool;
223
224 do {
225 item = LIST_PREV(&ti->pool_lru_head, struct pool_cache_item *, by_lru);
226 /* note: by definition we remove oldest objects so they also are the
227 * oldest in their own pools, thus their next is the pool's head.
228 */
229 ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
230 pool = container_of(ph - tid, struct pool_head, cache);
Willy Tarreau2b718102021-04-21 07:32:39 +0200231 LIST_DELETE(&item->by_pool);
232 LIST_DELETE(&item->by_lru);
Willy Tarreaub8498e92021-04-18 10:23:02 +0200233 ph->count--;
234 pool_cache_count--;
235 pool_cache_bytes -= pool->size;
236 pool_put_to_shared_cache(pool, item);
237 } while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
238}
Willy Tarreau0bae0752021-03-02 20:05:09 +0100239
Willy Tarreaub2a853d2021-04-19 11:49:26 +0200240/* Frees an object to the local cache, possibly pushing oldest objects to the
241 * shared cache, which itself may decide to release some of them to the OS.
242 * While it is unspecified what the object becomes past this point, it is
243 * guaranteed to be released from the users' perpective.
244 */
245void pool_put_to_cache(struct pool_head *pool, void *ptr)
246{
247 struct pool_cache_item *item = (struct pool_cache_item *)ptr;
248 struct pool_cache_head *ph = &pool->cache[tid];
249
Willy Tarreau2b718102021-04-21 07:32:39 +0200250 LIST_INSERT(&ph->list, &item->by_pool);
251 LIST_INSERT(&ti->pool_lru_head, &item->by_lru);
Willy Tarreaub2a853d2021-04-19 11:49:26 +0200252 ph->count++;
253 pool_cache_count++;
254 pool_cache_bytes += pool->size;
255
256 if (unlikely(pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4)) {
257 if (ph->count >= 16 + pool_cache_count / 8)
258 pool_evict_from_local_cache(pool);
259 if (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE)
260 pool_evict_from_local_caches();
261 }
262}
263
Willy Tarreaueb3cc292021-04-15 18:13:13 +0200264#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
265
Willy Tarreau0bae0752021-03-02 20:05:09 +0100266/* legacy stuff */
267void pool_flush(struct pool_head *pool)
268{
269}
270
271/* This function might ask the malloc library to trim its buffers. */
272void pool_gc(struct pool_head *pool_ctx)
273{
274#if defined(HA_HAVE_MALLOC_TRIM)
275 malloc_trim(0);
276#endif
277}
278
279#elif defined(CONFIG_HAP_LOCKLESS_POOLS)
280
Olivier Houchardcf975d42018-01-24 18:38:31 +0100281/*
282 * This function frees whatever can be freed in pool <pool>.
283 */
284void pool_flush(struct pool_head *pool)
285{
Olivier Houchardb6fa08b2020-02-01 17:37:22 +0100286 struct pool_free_list cmp, new;
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200287 void **next, *temp;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100288
289 if (!pool)
290 return;
Willy Tarreau21072b92020-05-29 17:23:05 +0200291 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100292 do {
Olivier Houchardb6fa08b2020-02-01 17:37:22 +0100293 cmp.free_list = pool->free_list;
294 cmp.seq = pool->seq;
295 new.free_list = NULL;
296 new.seq = cmp.seq + 1;
297 } while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
Olivier Houchard20872762019-03-08 18:53:35 +0100298 __ha_barrier_atomic_store();
Willy Tarreau21072b92020-05-29 17:23:05 +0200299 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Olivier Houchardb6fa08b2020-02-01 17:37:22 +0100300 next = cmp.free_list;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100301 while (next) {
302 temp = next;
303 next = *POOL_LINK(pool, temp);
Willy Tarreau45e4e282021-04-17 17:48:40 +0200304 pool_put_to_os(pool, temp);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100305 }
306 pool->free_list = next;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100307 /* here, we should have pool->allocate == pool->used */
308}
309
310/*
311 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200312 * the minimum thresholds imposed by owners. It makes sure to be alone to
313 * run by using thread_isolate(). <pool_ctx> is unused.
Olivier Houchardcf975d42018-01-24 18:38:31 +0100314 */
315void pool_gc(struct pool_head *pool_ctx)
316{
Olivier Houchardcf975d42018-01-24 18:38:31 +0100317 struct pool_head *entry;
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200318 int isolated = thread_isolated();
Olivier Houchardcf975d42018-01-24 18:38:31 +0100319
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200320 if (!isolated)
321 thread_isolate();
Olivier Houchardcf975d42018-01-24 18:38:31 +0100322
323 list_for_each_entry(entry, &pools, list) {
324 while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
325 struct pool_free_list cmp, new;
326
327 cmp.seq = entry->seq;
328 __ha_barrier_load();
329 cmp.free_list = entry->free_list;
330 __ha_barrier_load();
331 if (cmp.free_list == NULL)
332 break;
333 new.free_list = *POOL_LINK(entry, cmp.free_list);
334 new.seq = cmp.seq + 1;
Willy Tarreau6a38b322019-05-11 18:04:24 +0200335 if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0)
Olivier Houchardcf975d42018-01-24 18:38:31 +0100336 continue;
Willy Tarreau45e4e282021-04-17 17:48:40 +0200337 pool_put_to_os(entry, cmp.free_list);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100338 }
339 }
340
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200341 if (!isolated)
342 thread_release();
Willy Tarreau88366c22020-11-03 15:53:34 +0100343
344#if defined(HA_HAVE_MALLOC_TRIM)
345 malloc_trim(0);
346#endif
Olivier Houchardcf975d42018-01-24 18:38:31 +0100347}
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200348
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100349#else /* CONFIG_HAP_LOCKLESS_POOLS */
Willy Tarreau50e608d2007-05-13 18:26:08 +0200350
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200351/*
352 * This function frees whatever can be freed in pool <pool>.
353 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100354void pool_flush(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200355{
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200356 void *temp;
357
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200358 if (!pool)
359 return;
360
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200361 while (1) {
362 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
363 temp = pool->free_list;
364 if (!temp) {
365 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
366 break;
367 }
368 pool->free_list = *POOL_LINK(pool, temp);
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200369 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreau45e4e282021-04-17 17:48:40 +0200370 pool_put_to_os(pool, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200371 }
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200372 /* here, we should have pool->allocated == pool->used */
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200373}
374
375/*
376 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200377 * the minimum thresholds imposed by owners. It makes sure to be alone to
378 * run by using thread_isolate(). <pool_ctx> is unused.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200379 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100380void pool_gc(struct pool_head *pool_ctx)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200381{
382 struct pool_head *entry;
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200383 int isolated = thread_isolated();
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200384
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200385 if (!isolated)
386 thread_isolate();
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200387
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200388 list_for_each_entry(entry, &pools, list) {
Olivier Houchard51d93392020-03-12 19:05:39 +0100389 void *temp;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200390 //qfprintf(stderr, "Flushing pool %s\n", entry->name);
Olivier Houchard51d93392020-03-12 19:05:39 +0100391 while (entry->free_list &&
Willy Tarreau57767b82014-12-22 21:40:55 +0100392 (int)(entry->allocated - entry->used) > (int)entry->minavail) {
Olivier Houchard51d93392020-03-12 19:05:39 +0100393 temp = entry->free_list;
394 entry->free_list = *POOL_LINK(entry, temp);
Willy Tarreau45e4e282021-04-17 17:48:40 +0200395 pool_put_to_os(entry, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200396 }
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200397 }
Christopher Fauletb349e482017-08-29 09:52:38 +0200398
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200399 if (!isolated)
400 thread_release();
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200401}
Willy Tarreaub8498e92021-04-18 10:23:02 +0200402#endif /* CONFIG_HAP_LOCKLESS_POOLS */
403
404#else /* CONFIG_HAP_POOLS */
405
406/* legacy stuff */
407void pool_flush(struct pool_head *pool)
408{
409}
410
411/* This function might ask the malloc library to trim its buffers. */
412void pool_gc(struct pool_head *pool_ctx)
413{
414#if defined(HA_HAVE_MALLOC_TRIM)
415 malloc_trim(0);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100416#endif
Willy Tarreaub8498e92021-04-18 10:23:02 +0200417}
418
419#endif /* CONFIG_HAP_POOLS */
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200420
421/*
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200422 * This function destroys a pool by freeing it completely, unless it's still
423 * in use. This should be called only under extreme circumstances. It always
424 * returns NULL if the resulting pool is empty, easing the clearing of the old
425 * pointer, otherwise it returns the pool.
426 * .
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200427 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100428void *pool_destroy(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200429{
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200430 if (pool) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100431 pool_flush(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200432 if (pool->used)
433 return pool;
434 pool->users--;
435 if (!pool->users) {
Willy Tarreau2b718102021-04-21 07:32:39 +0200436 LIST_DELETE(&pool->list);
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100437#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100438 HA_SPIN_DESTROY(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100439#endif
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200440 /* note that if used == 0, the cache is empty */
441 free(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200442 }
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200443 }
444 return NULL;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200445}
446
Willy Tarreau2455ceb2018-11-26 15:57:34 +0100447/* This destroys all pools on exit. It is *not* thread safe. */
448void pool_destroy_all()
449{
450 struct pool_head *entry, *back;
451
452 list_for_each_entry_safe(entry, back, &pools, list)
453 pool_destroy(entry);
454}
455
Willy Tarreau12833bb2014-01-28 16:49:56 +0100456/* This function dumps memory usage information into the trash buffer. */
457void dump_pools_to_trash()
Willy Tarreau50e608d2007-05-13 18:26:08 +0200458{
459 struct pool_head *entry;
460 unsigned long allocated, used;
461 int nbpools;
462
463 allocated = used = nbpools = 0;
Willy Tarreau12833bb2014-01-28 16:49:56 +0100464 chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200465 list_for_each_entry(entry, &pools, list) {
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100466#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100467 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100468#endif
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200469 chunk_appendf(&trash, " - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, needed_avg %u, %u failures, %u users, @%p%s\n",
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200470 entry->name, entry->size, entry->allocated,
Willy Tarreaua1e4f8c2020-05-08 08:31:56 +0200471 entry->size * entry->allocated, entry->used,
Willy Tarreau606135a2020-06-01 12:35:03 +0200472 swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES), entry->failed,
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200473 entry->users, entry,
Willy Tarreau0a93b642018-10-16 07:58:39 +0200474 (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200475
476 allocated += entry->allocated * entry->size;
477 used += entry->used * entry->size;
478 nbpools++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100479#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100480 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100481#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200482 }
Willy Tarreau12833bb2014-01-28 16:49:56 +0100483 chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
Willy Tarreau50e608d2007-05-13 18:26:08 +0200484 nbpools, allocated, used);
485}
486
Willy Tarreau12833bb2014-01-28 16:49:56 +0100487/* Dump statistics on pools usage. */
488void dump_pools(void)
489{
490 dump_pools_to_trash();
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200491 qfprintf(stderr, "%s", trash.area);
Willy Tarreau12833bb2014-01-28 16:49:56 +0100492}
493
Willy Tarreau58102cf2015-10-28 16:24:21 +0100494/* This function returns the total number of failed pool allocations */
495int pool_total_failures()
496{
497 struct pool_head *entry;
498 int failed = 0;
499
500 list_for_each_entry(entry, &pools, list)
501 failed += entry->failed;
502 return failed;
503}
504
505/* This function returns the total amount of memory allocated in pools (in bytes) */
506unsigned long pool_total_allocated()
507{
508 struct pool_head *entry;
509 unsigned long allocated = 0;
510
511 list_for_each_entry(entry, &pools, list)
512 allocated += entry->allocated * entry->size;
513 return allocated;
514}
515
516/* This function returns the total amount of memory used in pools (in bytes) */
517unsigned long pool_total_used()
518{
519 struct pool_head *entry;
520 unsigned long used = 0;
521
522 list_for_each_entry(entry, &pools, list)
523 used += entry->used * entry->size;
524 return used;
525}
526
William Lallemande7ed8852016-11-19 02:25:36 +0100527/* This function dumps memory usage information onto the stream interface's
528 * read buffer. It returns 0 as long as it does not complete, non-zero upon
529 * completion. No state is used.
530 */
531static int cli_io_handler_dump_pools(struct appctx *appctx)
532{
533 struct stream_interface *si = appctx->owner;
534
535 dump_pools_to_trash();
Willy Tarreau06d80a92017-10-19 14:32:15 +0200536 if (ci_putchk(si_ic(si), &trash) == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +0100537 si_rx_room_blk(si);
William Lallemande7ed8852016-11-19 02:25:36 +0100538 return 0;
539 }
540 return 1;
541}
542
Willy Tarreau7107c8b2018-11-26 11:44:35 +0100543/* callback used to create early pool <name> of size <size> and store the
544 * resulting pointer into <ptr>. If the allocation fails, it quits with after
545 * emitting an error message.
546 */
547void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
548{
549 *ptr = create_pool(name, size, MEM_F_SHARED);
550 if (!*ptr) {
551 ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
552 name, size, strerror(errno));
553 exit(1);
554 }
555}
556
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100557/* Initializes all per-thread arrays on startup */
558static void init_pools()
559{
Willy Tarreau2d6f6282021-04-15 16:24:00 +0200560#ifdef CONFIG_HAP_POOLS
Willy Tarreau9f3129e2021-04-17 00:31:38 +0200561 int thr;
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100562
563 for (thr = 0; thr < MAX_THREADS; thr++) {
Willy Tarreau20dc3cd2020-06-28 00:54:27 +0200564 LIST_INIT(&ha_thread_info[thr].pool_lru_head);
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100565 }
Willy Tarreaued891fd2020-06-01 19:00:28 +0200566#endif
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100567}
568
569INITCALL0(STG_PREPARE, init_pools);
Willy Tarreau7107c8b2018-11-26 11:44:35 +0100570
William Lallemande7ed8852016-11-19 02:25:36 +0100571/* register cli keywords */
572static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaue9ecec82016-12-16 18:55:23 +0100573 { { "show", "pools", NULL }, "show pools : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
William Lallemande7ed8852016-11-19 02:25:36 +0100574 {{},}
575}};
576
Willy Tarreau0108d902018-11-25 19:14:37 +0100577INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
William Lallemande7ed8852016-11-19 02:25:36 +0100578
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100579#ifdef DEBUG_FAIL_ALLOC
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100580
581int mem_should_fail(const struct pool_head *pool)
582{
Olivier Houchard9c4f08a2019-02-01 16:28:04 +0100583 int ret = 0;
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100584
585 if (mem_fail_rate > 0 && !(global.mode & MODE_STARTING)) {
Willy Tarreau20f88ab2021-04-17 15:50:28 +0200586 if (mem_fail_rate > statistical_prng_range(100))
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100587 ret = 1;
588 else
589 ret = 0;
590 }
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100591 return ret;
592
593}
594
595/* config parser for global "tune.fail-alloc" */
596static int mem_parse_global_fail_alloc(char **args, int section_type, struct proxy *curpx,
Amaury Denoyelle3b1c9a32021-03-22 11:21:36 +0100597 const struct proxy *defpx, const char *file, int line,
598 char **err)
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100599{
600 if (too_many_args(1, args, err, NULL))
601 return -1;
602 mem_fail_rate = atoi(args[1]);
603 if (mem_fail_rate < 0 || mem_fail_rate > 100) {
604 memprintf(err, "'%s' expects a numeric value between 0 and 100.", args[0]);
605 return -1;
606 }
607 return 0;
608}
609#endif
610
611/* register global config keywords */
612static struct cfg_kw_list mem_cfg_kws = {ILH, {
613#ifdef DEBUG_FAIL_ALLOC
614 { CFG_GLOBAL, "tune.fail-alloc", mem_parse_global_fail_alloc },
615#endif
616 { 0, NULL, NULL }
617}};
618
619INITCALL1(STG_REGISTER, cfg_register_keywords, &mem_cfg_kws);
620
Willy Tarreau50e608d2007-05-13 18:26:08 +0200621/*
622 * Local variables:
623 * c-indent-level: 8
624 * c-basic-offset: 8
625 * End:
626 */