blob: 2caf13d9c14e2fbc323aa3d129ad763f770046f4 [file] [log] [blame]
Willy Tarreau50e608d2007-05-13 18:26:08 +02001/*
2 * Memory management functions.
3 *
4 * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
Willy Tarreau7107c8b2018-11-26 11:44:35 +010012#include <errno.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020013
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020014#include <haproxy/api.h>
William Lallemande7ed8852016-11-19 02:25:36 +010015#include <types/applet.h>
16#include <types/cli.h>
Willy Tarreau12833bb2014-01-28 16:49:56 +010017#include <types/global.h>
William Lallemande7ed8852016-11-19 02:25:36 +010018#include <types/stats.h>
19
Olivier Houcharddc21ff72019-01-29 15:20:16 +010020#include <common/cfgparse.h>
Willy Tarreaue18db9e2018-10-16 10:28:54 +020021#include <common/hathreads.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020022#include <common/memory.h>
Willy Tarreau853b2972020-05-27 18:01:47 +020023#include <haproxy/list.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020024#include <common/standard.h>
25
Willy Tarreaua8b2ce02019-05-28 17:04:16 +020026#include <types/activity.h>
27
William Lallemande7ed8852016-11-19 02:25:36 +010028#include <proto/applet.h>
29#include <proto/cli.h>
30#include <proto/channel.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020031#include <proto/log.h>
William Lallemande7ed8852016-11-19 02:25:36 +010032#include <proto/stream_interface.h>
33#include <proto/stats.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020034
Willy Tarreau0a93b642018-10-16 07:58:39 +020035/* These are the most common pools, expected to be initialized first. These
36 * ones are allocated from an array, allowing to map them to an index.
37 */
38struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
39unsigned int pool_base_count = 0;
40
Willy Tarreau7f0165e2018-11-26 17:09:46 +010041/* These ones are initialized per-thread on startup by init_pools() */
42struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS];
43static struct list pool_lru_head[MAX_THREADS]; /* oldest objects */
Willy Tarreaue18db9e2018-10-16 10:28:54 +020044THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
45THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
46
Willy Tarreau50e608d2007-05-13 18:26:08 +020047static struct list pools = LIST_HEAD_INIT(pools);
Willy Tarreau067ac9f2015-10-08 14:12:13 +020048int mem_poison_byte = -1;
Willy Tarreau50e608d2007-05-13 18:26:08 +020049
Olivier Houcharddc21ff72019-01-29 15:20:16 +010050#ifdef DEBUG_FAIL_ALLOC
51static int mem_fail_rate = 0;
52static int mem_should_fail(const struct pool_head *);
53#endif
54
Willy Tarreau50e608d2007-05-13 18:26:08 +020055/* Try to find an existing shared pool with the same characteristics and
56 * returns it, otherwise creates this one. NULL is returned if no memory
Willy Tarreau581bf812016-01-25 02:19:13 +010057 * is available for a new creation. Two flags are supported :
58 * - MEM_F_SHARED to indicate that the pool may be shared with other users
59 * - MEM_F_EXACT to indicate that the size must not be rounded up
Willy Tarreau50e608d2007-05-13 18:26:08 +020060 */
61struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
62{
63 struct pool_head *pool;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020064 struct pool_head *entry;
65 struct list *start;
Willy Tarreau50e608d2007-05-13 18:26:08 +020066 unsigned int align;
Christopher Faulet2f6d3c02019-06-25 21:45:59 +020067 int thr, idx;
Willy Tarreau50e608d2007-05-13 18:26:08 +020068
Willy Tarreauac421112015-10-28 15:09:29 +010069 /* We need to store a (void *) at the end of the chunks. Since we know
Willy Tarreau50e608d2007-05-13 18:26:08 +020070 * that the malloc() function will never return such a small size,
71 * let's round the size up to something slightly bigger, in order to
72 * ease merging of entries. Note that the rounding is a power of two.
Willy Tarreauac421112015-10-28 15:09:29 +010073 * This extra (void *) is not accounted for in the size computation
74 * so that the visible parts outside are not affected.
Willy Tarreau30f931e2018-10-23 14:40:23 +020075 *
76 * Note: for the LRU cache, we need to store 2 doubly-linked lists.
Willy Tarreau50e608d2007-05-13 18:26:08 +020077 */
78
Willy Tarreau581bf812016-01-25 02:19:13 +010079 if (!(flags & MEM_F_EXACT)) {
Willy Tarreau30f931e2018-10-23 14:40:23 +020080 align = 4 * sizeof(void *); // 2 lists = 4 pointers min
Willy Tarreau581bf812016-01-25 02:19:13 +010081 size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
82 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020083
Christopher Fauletb349e482017-08-29 09:52:38 +020084 /* TODO: thread: we do not lock pool list for now because all pools are
85 * created during HAProxy startup (so before threads creation) */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020086 start = &pools;
Willy Tarreau50e608d2007-05-13 18:26:08 +020087 pool = NULL;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020088
89 list_for_each_entry(entry, &pools, list) {
90 if (entry->size == size) {
91 /* either we can share this place and we take it, or
92 * we look for a sharable one or for the next position
93 * before which we will insert a new one.
94 */
95 if (flags & entry->flags & MEM_F_SHARED) {
96 /* we can share this one */
Willy Tarreau50e608d2007-05-13 18:26:08 +020097 pool = entry;
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020098 DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
Willy Tarreau50e608d2007-05-13 18:26:08 +020099 break;
100 }
101 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200102 else if (entry->size > size) {
103 /* insert before this one */
104 start = &entry->list;
105 break;
106 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200107 }
108
109 if (!pool) {
Willy Tarreau0a93b642018-10-16 07:58:39 +0200110 if (pool_base_count < MAX_BASE_POOLS)
111 pool = &pool_base_start[pool_base_count++];
112
113 if (!pool) {
114 /* look for a freed entry */
115 for (entry = pool_base_start; entry != pool_base_start + MAX_BASE_POOLS; entry++) {
116 if (!entry->size) {
117 pool = entry;
118 break;
119 }
120 }
121 }
122
123 if (!pool)
124 pool = calloc(1, sizeof(*pool));
125
Willy Tarreau50e608d2007-05-13 18:26:08 +0200126 if (!pool)
127 return NULL;
128 if (name)
129 strlcpy2(pool->name, name, sizeof(pool->name));
130 pool->size = size;
131 pool->flags = flags;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200132 LIST_ADDQ(start, &pool->list);
Christopher Faulet2f6d3c02019-06-25 21:45:59 +0200133
134 /* update per-thread pool cache if necessary */
135 idx = pool_get_index(pool);
136 if (idx >= 0) {
137 for (thr = 0; thr < MAX_THREADS; thr++)
138 pool_cache[thr][idx].size = size;
139 }
Olivier Houchard8af97eb2020-02-01 17:45:32 +0100140 HA_SPIN_INIT(&pool->lock);
Olivier Houchard8af97eb2020-02-01 17:45:32 +0100141 }
142 pool->users++;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200143 return pool;
144}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100145
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100146#ifdef CONFIG_HAP_LOCKLESS_POOLS
Olivier Houchardcf975d42018-01-24 18:38:31 +0100147/* Allocates new entries for pool <pool> until there are at least <avail> + 1
148 * available, then returns the last one for immediate use, so that at least
149 * <avail> are left available in the pool upon return. NULL is returned if the
150 * last entry could not be allocated. It's important to note that at least one
151 * allocation is always performed even if there are enough entries in the pool.
152 * A call to the garbage collector is performed at most once in case malloc()
153 * returns an error, before returning NULL.
154 */
155void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
156{
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200157 void *ptr = NULL, **free_list;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100158 int failed = 0;
159 int size = pool->size;
160 int limit = pool->limit;
161 int allocated = pool->allocated, allocated_orig = allocated;
162
163 /* stop point */
164 avail += pool->used;
165
166 while (1) {
167 if (limit && allocated >= limit) {
Olivier Houchard20872762019-03-08 18:53:35 +0100168 _HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200169 activity[tid].pool_fail++;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100170 return NULL;
171 }
172
Willy Tarreaua1e4f8c2020-05-08 08:31:56 +0200173 pool_avg_bump(&pool->needed_avg, pool->allocated);
174
Olivier Houchardcf975d42018-01-24 18:38:31 +0100175 ptr = malloc(size + POOL_EXTRA);
176 if (!ptr) {
Olivier Houchard20872762019-03-08 18:53:35 +0100177 _HA_ATOMIC_ADD(&pool->failed, 1);
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200178 if (failed) {
179 activity[tid].pool_fail++;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100180 return NULL;
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200181 }
Olivier Houchardcf975d42018-01-24 18:38:31 +0100182 failed++;
183 pool_gc(pool);
184 continue;
185 }
186 if (++allocated > avail)
187 break;
188
189 free_list = pool->free_list;
190 do {
191 *POOL_LINK(pool, ptr) = free_list;
192 __ha_barrier_store();
Olivier Houchard20872762019-03-08 18:53:35 +0100193 } while (_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100194 }
Olivier Houchard20872762019-03-08 18:53:35 +0100195 __ha_barrier_atomic_store();
Olivier Houchardcf975d42018-01-24 18:38:31 +0100196
Olivier Houchard20872762019-03-08 18:53:35 +0100197 _HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
198 _HA_ATOMIC_ADD(&pool->used, 1);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100199
200#ifdef DEBUG_MEMORY_POOLS
201 /* keep track of where the element was allocated from */
202 *POOL_LINK(pool, ptr) = (void *)pool;
203#endif
204 return ptr;
205}
206void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
207{
208 void *ptr;
209
210 ptr = __pool_refill_alloc(pool, avail);
211 return ptr;
212}
213/*
214 * This function frees whatever can be freed in pool <pool>.
215 */
216void pool_flush(struct pool_head *pool)
217{
Olivier Houchardb6fa08b2020-02-01 17:37:22 +0100218 struct pool_free_list cmp, new;
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200219 void **next, *temp;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100220 int removed = 0;
221
222 if (!pool)
223 return;
Willy Tarreau21072b92020-05-29 17:23:05 +0200224 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100225 do {
Olivier Houchardb6fa08b2020-02-01 17:37:22 +0100226 cmp.free_list = pool->free_list;
227 cmp.seq = pool->seq;
228 new.free_list = NULL;
229 new.seq = cmp.seq + 1;
230 } while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
Olivier Houchard20872762019-03-08 18:53:35 +0100231 __ha_barrier_atomic_store();
Willy Tarreau21072b92020-05-29 17:23:05 +0200232 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Olivier Houchardb6fa08b2020-02-01 17:37:22 +0100233 next = cmp.free_list;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100234 while (next) {
235 temp = next;
236 next = *POOL_LINK(pool, temp);
237 removed++;
238 free(temp);
239 }
240 pool->free_list = next;
Olivier Houchard20872762019-03-08 18:53:35 +0100241 _HA_ATOMIC_SUB(&pool->allocated, removed);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100242 /* here, we should have pool->allocate == pool->used */
243}
244
245/*
246 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200247 * the minimum thresholds imposed by owners. It makes sure to be alone to
248 * run by using thread_isolate(). <pool_ctx> is unused.
Olivier Houchardcf975d42018-01-24 18:38:31 +0100249 */
250void pool_gc(struct pool_head *pool_ctx)
251{
Olivier Houchardcf975d42018-01-24 18:38:31 +0100252 struct pool_head *entry;
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200253 int isolated = thread_isolated();
Olivier Houchardcf975d42018-01-24 18:38:31 +0100254
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200255 if (!isolated)
256 thread_isolate();
Olivier Houchardcf975d42018-01-24 18:38:31 +0100257
258 list_for_each_entry(entry, &pools, list) {
259 while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
260 struct pool_free_list cmp, new;
261
262 cmp.seq = entry->seq;
263 __ha_barrier_load();
264 cmp.free_list = entry->free_list;
265 __ha_barrier_load();
266 if (cmp.free_list == NULL)
267 break;
268 new.free_list = *POOL_LINK(entry, cmp.free_list);
269 new.seq = cmp.seq + 1;
Willy Tarreau6a38b322019-05-11 18:04:24 +0200270 if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0)
Olivier Houchardcf975d42018-01-24 18:38:31 +0100271 continue;
272 free(cmp.free_list);
Olivier Houchard20872762019-03-08 18:53:35 +0100273 _HA_ATOMIC_SUB(&entry->allocated, 1);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100274 }
275 }
276
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200277 if (!isolated)
278 thread_release();
Olivier Houchardcf975d42018-01-24 18:38:31 +0100279}
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200280
281/* frees an object to the local cache, possibly pushing oldest objects to the
282 * global pool. Must not be called directly.
283 */
284void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx)
285{
286 struct pool_cache_item *item = (struct pool_cache_item *)ptr;
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100287 struct pool_cache_head *ph = &pool_cache[tid][idx];
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200288
289 LIST_ADD(&ph->list, &item->by_pool);
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100290 LIST_ADD(&pool_lru_head[tid], &item->by_lru);
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200291 ph->count++;
292 pool_cache_count++;
293 pool_cache_bytes += ph->size;
294
295 if (pool_cache_bytes <= CONFIG_HAP_POOL_CACHE_SIZE)
296 return;
297
298 do {
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100299 item = LIST_PREV(&pool_lru_head[tid], struct pool_cache_item *, by_lru);
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200300 /* note: by definition we remove oldest objects so they also are the
301 * oldest in their own pools, thus their next is the pool's head.
302 */
303 ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
304 LIST_DEL(&item->by_pool);
305 LIST_DEL(&item->by_lru);
306 ph->count--;
307 pool_cache_count--;
308 pool_cache_bytes -= ph->size;
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100309 __pool_free(pool_base_start + (ph - pool_cache[tid]), item);
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200310 } while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
311}
312
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100313#else /* CONFIG_HAP_LOCKLESS_POOLS */
Willy Tarreau50e608d2007-05-13 18:26:08 +0200314
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100315/* Allocates new entries for pool <pool> until there are at least <avail> + 1
316 * available, then returns the last one for immediate use, so that at least
317 * <avail> are left available in the pool upon return. NULL is returned if the
318 * last entry could not be allocated. It's important to note that at least one
319 * allocation is always performed even if there are enough entries in the pool.
320 * A call to the garbage collector is performed at most once in case malloc()
321 * returns an error, before returning NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200322 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200323void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
Willy Tarreau50e608d2007-05-13 18:26:08 +0200324{
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100325 void *ptr = NULL;
326 int failed = 0;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200327
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100328#ifdef DEBUG_FAIL_ALLOC
329 if (mem_should_fail(pool))
330 return NULL;
331#endif
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100332 /* stop point */
333 avail += pool->used;
334
335 while (1) {
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200336 if (pool->limit && pool->allocated >= pool->limit) {
337 activity[tid].pool_fail++;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200338 return NULL;
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200339 }
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100340
Willy Tarreaua1e4f8c2020-05-08 08:31:56 +0200341 pool_avg_bump(&pool->needed_avg, pool->allocated);
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200342 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreauf13322e2017-11-22 10:50:54 +0100343 ptr = pool_alloc_area(pool->size + POOL_EXTRA);
Willy Tarreau82867542019-07-04 11:48:16 +0200344#ifdef DEBUG_MEMORY_POOLS
345 /* keep track of where the element was allocated from. This
346 * is done out of the lock so that the system really allocates
347 * the data without harming other threads waiting on the lock.
348 */
349 if (ptr)
350 *POOL_LINK(pool, ptr) = (void *)pool;
351#endif
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200352 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100353 if (!ptr) {
Willy Tarreau58102cf2015-10-28 16:24:21 +0100354 pool->failed++;
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200355 if (failed) {
356 activity[tid].pool_fail++;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100357 return NULL;
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200358 }
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100359 failed++;
Willy Tarreaubafbe012017-11-24 17:34:44 +0100360 pool_gc(pool);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100361 continue;
362 }
363 if (++pool->allocated > avail)
364 break;
365
Willy Tarreauac421112015-10-28 15:09:29 +0100366 *POOL_LINK(pool, ptr) = (void *)pool->free_list;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100367 pool->free_list = ptr;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200368 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200369 pool->used++;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100370 return ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200371}
Christopher Fauletb349e482017-08-29 09:52:38 +0200372void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
373{
374 void *ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200375
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100376 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200377 ptr = __pool_refill_alloc(pool, avail);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100378 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200379 return ptr;
380}
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200381/*
382 * This function frees whatever can be freed in pool <pool>.
383 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100384void pool_flush(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200385{
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200386 void *temp;
387
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200388 if (!pool)
389 return;
390
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200391 while (1) {
392 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
393 temp = pool->free_list;
394 if (!temp) {
395 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
396 break;
397 }
398 pool->free_list = *POOL_LINK(pool, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200399 pool->allocated--;
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200400 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreauf13322e2017-11-22 10:50:54 +0100401 pool_free_area(temp, pool->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200402 }
Willy Tarreau3e853ea2019-07-04 11:30:00 +0200403 /* here, we should have pool->allocated == pool->used */
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200404}
405
406/*
407 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200408 * the minimum thresholds imposed by owners. It makes sure to be alone to
409 * run by using thread_isolate(). <pool_ctx> is unused.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200410 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100411void pool_gc(struct pool_head *pool_ctx)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200412{
413 struct pool_head *entry;
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200414 int isolated = thread_isolated();
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200415
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200416 if (!isolated)
417 thread_isolate();
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200418
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200419 list_for_each_entry(entry, &pools, list) {
Olivier Houchard51d93392020-03-12 19:05:39 +0100420 void *temp;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200421 //qfprintf(stderr, "Flushing pool %s\n", entry->name);
Olivier Houchard51d93392020-03-12 19:05:39 +0100422 while (entry->free_list &&
Willy Tarreau57767b82014-12-22 21:40:55 +0100423 (int)(entry->allocated - entry->used) > (int)entry->minavail) {
Olivier Houchard51d93392020-03-12 19:05:39 +0100424 temp = entry->free_list;
425 entry->free_list = *POOL_LINK(entry, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200426 entry->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100427 pool_free_area(temp, entry->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200428 }
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200429 }
Christopher Fauletb349e482017-08-29 09:52:38 +0200430
Willy Tarreauc0e2ff22020-04-24 06:15:24 +0200431 if (!isolated)
432 thread_release();
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200433}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100434#endif
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200435
436/*
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200437 * This function destroys a pool by freeing it completely, unless it's still
438 * in use. This should be called only under extreme circumstances. It always
439 * returns NULL if the resulting pool is empty, easing the clearing of the old
440 * pointer, otherwise it returns the pool.
441 * .
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200442 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100443void *pool_destroy(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200444{
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200445 if (pool) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100446 pool_flush(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200447 if (pool->used)
448 return pool;
449 pool->users--;
450 if (!pool->users) {
451 LIST_DEL(&pool->list);
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100452#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100453 HA_SPIN_DESTROY(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100454#endif
Willy Tarreau0a93b642018-10-16 07:58:39 +0200455 if ((pool - pool_base_start) < MAX_BASE_POOLS)
456 memset(pool, 0, sizeof(*pool));
457 else
458 free(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200459 }
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200460 }
461 return NULL;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200462}
463
Willy Tarreau2455ceb2018-11-26 15:57:34 +0100464/* This destroys all pools on exit. It is *not* thread safe. */
465void pool_destroy_all()
466{
467 struct pool_head *entry, *back;
468
469 list_for_each_entry_safe(entry, back, &pools, list)
470 pool_destroy(entry);
471}
472
Willy Tarreau12833bb2014-01-28 16:49:56 +0100473/* This function dumps memory usage information into the trash buffer. */
474void dump_pools_to_trash()
Willy Tarreau50e608d2007-05-13 18:26:08 +0200475{
476 struct pool_head *entry;
477 unsigned long allocated, used;
478 int nbpools;
479
480 allocated = used = nbpools = 0;
Willy Tarreau12833bb2014-01-28 16:49:56 +0100481 chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200482 list_for_each_entry(entry, &pools, list) {
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100483#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100484 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100485#endif
Willy Tarreaua1e4f8c2020-05-08 08:31:56 +0200486 chunk_appendf(&trash, " - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, needed_avg %u, %u failures, %u users, @%p=%02d%s\n",
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200487 entry->name, entry->size, entry->allocated,
Willy Tarreaua1e4f8c2020-05-08 08:31:56 +0200488 entry->size * entry->allocated, entry->used,
489 pool_avg(entry->needed_avg), entry->failed,
Willy Tarreau0a93b642018-10-16 07:58:39 +0200490 entry->users, entry, (int)pool_get_index(entry),
491 (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200492
493 allocated += entry->allocated * entry->size;
494 used += entry->used * entry->size;
495 nbpools++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100496#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100497 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100498#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200499 }
Willy Tarreau12833bb2014-01-28 16:49:56 +0100500 chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
Willy Tarreau50e608d2007-05-13 18:26:08 +0200501 nbpools, allocated, used);
502}
503
Willy Tarreau12833bb2014-01-28 16:49:56 +0100504/* Dump statistics on pools usage. */
505void dump_pools(void)
506{
507 dump_pools_to_trash();
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200508 qfprintf(stderr, "%s", trash.area);
Willy Tarreau12833bb2014-01-28 16:49:56 +0100509}
510
Willy Tarreau58102cf2015-10-28 16:24:21 +0100511/* This function returns the total number of failed pool allocations */
512int pool_total_failures()
513{
514 struct pool_head *entry;
515 int failed = 0;
516
517 list_for_each_entry(entry, &pools, list)
518 failed += entry->failed;
519 return failed;
520}
521
522/* This function returns the total amount of memory allocated in pools (in bytes) */
523unsigned long pool_total_allocated()
524{
525 struct pool_head *entry;
526 unsigned long allocated = 0;
527
528 list_for_each_entry(entry, &pools, list)
529 allocated += entry->allocated * entry->size;
530 return allocated;
531}
532
533/* This function returns the total amount of memory used in pools (in bytes) */
534unsigned long pool_total_used()
535{
536 struct pool_head *entry;
537 unsigned long used = 0;
538
539 list_for_each_entry(entry, &pools, list)
540 used += entry->used * entry->size;
541 return used;
542}
543
William Lallemande7ed8852016-11-19 02:25:36 +0100544/* This function dumps memory usage information onto the stream interface's
545 * read buffer. It returns 0 as long as it does not complete, non-zero upon
546 * completion. No state is used.
547 */
548static int cli_io_handler_dump_pools(struct appctx *appctx)
549{
550 struct stream_interface *si = appctx->owner;
551
552 dump_pools_to_trash();
Willy Tarreau06d80a92017-10-19 14:32:15 +0200553 if (ci_putchk(si_ic(si), &trash) == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +0100554 si_rx_room_blk(si);
William Lallemande7ed8852016-11-19 02:25:36 +0100555 return 0;
556 }
557 return 1;
558}
559
Willy Tarreau7107c8b2018-11-26 11:44:35 +0100560/* callback used to create early pool <name> of size <size> and store the
561 * resulting pointer into <ptr>. If the allocation fails, it quits with after
562 * emitting an error message.
563 */
564void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
565{
566 *ptr = create_pool(name, size, MEM_F_SHARED);
567 if (!*ptr) {
568 ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
569 name, size, strerror(errno));
570 exit(1);
571 }
572}
573
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100574/* Initializes all per-thread arrays on startup */
575static void init_pools()
576{
577 int thr, idx;
578
579 for (thr = 0; thr < MAX_THREADS; thr++) {
580 for (idx = 0; idx < MAX_BASE_POOLS; idx++) {
581 LIST_INIT(&pool_cache[thr][idx].list);
582 pool_cache[thr][idx].size = 0;
583 }
584 LIST_INIT(&pool_lru_head[thr]);
585 }
586}
587
588INITCALL0(STG_PREPARE, init_pools);
Willy Tarreau7107c8b2018-11-26 11:44:35 +0100589
William Lallemande7ed8852016-11-19 02:25:36 +0100590/* register cli keywords */
591static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaue9ecec82016-12-16 18:55:23 +0100592 { { "show", "pools", NULL }, "show pools : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
William Lallemande7ed8852016-11-19 02:25:36 +0100593 {{},}
594}};
595
Willy Tarreau0108d902018-11-25 19:14:37 +0100596INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
William Lallemande7ed8852016-11-19 02:25:36 +0100597
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100598#ifdef DEBUG_FAIL_ALLOC
599#define MEM_FAIL_MAX_CHAR 32
600#define MEM_FAIL_MAX_STR 128
601static int mem_fail_cur_idx;
602static char mem_fail_str[MEM_FAIL_MAX_CHAR * MEM_FAIL_MAX_STR];
603__decl_hathreads(static HA_SPINLOCK_T mem_fail_lock);
604
605int mem_should_fail(const struct pool_head *pool)
606{
Olivier Houchard9c4f08a2019-02-01 16:28:04 +0100607 int ret = 0;
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100608 int n;
609
610 if (mem_fail_rate > 0 && !(global.mode & MODE_STARTING)) {
Willy Tarreau52bf8392020-03-08 00:42:37 +0100611 int randnb = ha_random() % 100;
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100612
613 if (mem_fail_rate > randnb)
614 ret = 1;
615 else
616 ret = 0;
617 }
Olivier Houchard04f5fe82020-02-01 17:49:31 +0100618 HA_SPIN_LOCK(POOL_LOCK, &mem_fail_lock);
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100619 n = snprintf(&mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR],
620 MEM_FAIL_MAX_CHAR - 2,
621 "%d %.18s %d %d", mem_fail_cur_idx, pool->name, ret, tid);
622 while (n < MEM_FAIL_MAX_CHAR - 1)
623 mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n++] = ' ';
624 if (mem_fail_cur_idx < MEM_FAIL_MAX_STR - 1)
625 mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n] = '\n';
626 else
627 mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n] = 0;
628 mem_fail_cur_idx++;
629 if (mem_fail_cur_idx == MEM_FAIL_MAX_STR)
630 mem_fail_cur_idx = 0;
Olivier Houchard04f5fe82020-02-01 17:49:31 +0100631 HA_SPIN_UNLOCK(POOL_LOCK, &mem_fail_lock);
Olivier Houcharddc21ff72019-01-29 15:20:16 +0100632 return ret;
633
634}
635
636/* config parser for global "tune.fail-alloc" */
637static int mem_parse_global_fail_alloc(char **args, int section_type, struct proxy *curpx,
638 struct proxy *defpx, const char *file, int line,
639 char **err)
640{
641 if (too_many_args(1, args, err, NULL))
642 return -1;
643 mem_fail_rate = atoi(args[1]);
644 if (mem_fail_rate < 0 || mem_fail_rate > 100) {
645 memprintf(err, "'%s' expects a numeric value between 0 and 100.", args[0]);
646 return -1;
647 }
648 return 0;
649}
650#endif
651
652/* register global config keywords */
653static struct cfg_kw_list mem_cfg_kws = {ILH, {
654#ifdef DEBUG_FAIL_ALLOC
655 { CFG_GLOBAL, "tune.fail-alloc", mem_parse_global_fail_alloc },
656#endif
657 { 0, NULL, NULL }
658}};
659
660INITCALL1(STG_REGISTER, cfg_register_keywords, &mem_cfg_kws);
661
Willy Tarreau50e608d2007-05-13 18:26:08 +0200662/*
663 * Local variables:
664 * c-indent-level: 8
665 * c-basic-offset: 8
666 * End:
667 */