blob: 6cafd8b238eff66e3fc10c5a82341d887c6d5b24 [file] [log] [blame]
Willy Tarreau50e608d2007-05-13 18:26:08 +02001/*
2 * Memory management functions.
3 *
4 * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
Willy Tarreau7107c8b2018-11-26 11:44:35 +010012#include <errno.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020013
William Lallemande7ed8852016-11-19 02:25:36 +010014#include <types/applet.h>
15#include <types/cli.h>
Willy Tarreau12833bb2014-01-28 16:49:56 +010016#include <types/global.h>
William Lallemande7ed8852016-11-19 02:25:36 +010017#include <types/stats.h>
18
Willy Tarreau50e608d2007-05-13 18:26:08 +020019#include <common/config.h>
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020020#include <common/debug.h>
Willy Tarreaue18db9e2018-10-16 10:28:54 +020021#include <common/hathreads.h>
Willy Tarreau0108d902018-11-25 19:14:37 +010022#include <common/initcall.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020023#include <common/memory.h>
24#include <common/mini-clist.h>
25#include <common/standard.h>
26
William Lallemande7ed8852016-11-19 02:25:36 +010027#include <proto/applet.h>
28#include <proto/cli.h>
29#include <proto/channel.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020030#include <proto/log.h>
William Lallemande7ed8852016-11-19 02:25:36 +010031#include <proto/stream_interface.h>
32#include <proto/stats.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020033
Willy Tarreau0a93b642018-10-16 07:58:39 +020034/* These are the most common pools, expected to be initialized first. These
35 * ones are allocated from an array, allowing to map them to an index.
36 */
37struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
38unsigned int pool_base_count = 0;
39
Willy Tarreau7f0165e2018-11-26 17:09:46 +010040/* These ones are initialized per-thread on startup by init_pools() */
41struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS];
42static struct list pool_lru_head[MAX_THREADS]; /* oldest objects */
Willy Tarreaue18db9e2018-10-16 10:28:54 +020043THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
44THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
45
Willy Tarreau50e608d2007-05-13 18:26:08 +020046static struct list pools = LIST_HEAD_INIT(pools);
Willy Tarreau067ac9f2015-10-08 14:12:13 +020047int mem_poison_byte = -1;
Willy Tarreau50e608d2007-05-13 18:26:08 +020048
49/* Try to find an existing shared pool with the same characteristics and
50 * returns it, otherwise creates this one. NULL is returned if no memory
Willy Tarreau581bf812016-01-25 02:19:13 +010051 * is available for a new creation. Two flags are supported :
52 * - MEM_F_SHARED to indicate that the pool may be shared with other users
53 * - MEM_F_EXACT to indicate that the size must not be rounded up
Willy Tarreau50e608d2007-05-13 18:26:08 +020054 */
55struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
56{
57 struct pool_head *pool;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020058 struct pool_head *entry;
59 struct list *start;
Willy Tarreau50e608d2007-05-13 18:26:08 +020060 unsigned int align;
61
Willy Tarreauac421112015-10-28 15:09:29 +010062 /* We need to store a (void *) at the end of the chunks. Since we know
Willy Tarreau50e608d2007-05-13 18:26:08 +020063 * that the malloc() function will never return such a small size,
64 * let's round the size up to something slightly bigger, in order to
65 * ease merging of entries. Note that the rounding is a power of two.
Willy Tarreauac421112015-10-28 15:09:29 +010066 * This extra (void *) is not accounted for in the size computation
67 * so that the visible parts outside are not affected.
Willy Tarreau30f931e2018-10-23 14:40:23 +020068 *
69 * Note: for the LRU cache, we need to store 2 doubly-linked lists.
Willy Tarreau50e608d2007-05-13 18:26:08 +020070 */
71
Willy Tarreau581bf812016-01-25 02:19:13 +010072 if (!(flags & MEM_F_EXACT)) {
Willy Tarreau30f931e2018-10-23 14:40:23 +020073 align = 4 * sizeof(void *); // 2 lists = 4 pointers min
Willy Tarreau581bf812016-01-25 02:19:13 +010074 size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
75 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020076
Christopher Fauletb349e482017-08-29 09:52:38 +020077 /* TODO: thread: we do not lock pool list for now because all pools are
78 * created during HAProxy startup (so before threads creation) */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020079 start = &pools;
Willy Tarreau50e608d2007-05-13 18:26:08 +020080 pool = NULL;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020081
82 list_for_each_entry(entry, &pools, list) {
83 if (entry->size == size) {
84 /* either we can share this place and we take it, or
85 * we look for a sharable one or for the next position
86 * before which we will insert a new one.
87 */
88 if (flags & entry->flags & MEM_F_SHARED) {
89 /* we can share this one */
Willy Tarreau50e608d2007-05-13 18:26:08 +020090 pool = entry;
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020091 DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
Willy Tarreau50e608d2007-05-13 18:26:08 +020092 break;
93 }
94 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020095 else if (entry->size > size) {
96 /* insert before this one */
97 start = &entry->list;
98 break;
99 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200100 }
101
102 if (!pool) {
Willy Tarreau0a93b642018-10-16 07:58:39 +0200103 if (pool_base_count < MAX_BASE_POOLS)
104 pool = &pool_base_start[pool_base_count++];
105
106 if (!pool) {
107 /* look for a freed entry */
108 for (entry = pool_base_start; entry != pool_base_start + MAX_BASE_POOLS; entry++) {
109 if (!entry->size) {
110 pool = entry;
111 break;
112 }
113 }
114 }
115
116 if (!pool)
117 pool = calloc(1, sizeof(*pool));
118
Willy Tarreau50e608d2007-05-13 18:26:08 +0200119 if (!pool)
120 return NULL;
121 if (name)
122 strlcpy2(pool->name, name, sizeof(pool->name));
123 pool->size = size;
124 pool->flags = flags;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200125 LIST_ADDQ(start, &pool->list);
Willy Tarreau50e608d2007-05-13 18:26:08 +0200126 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200127 pool->users++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100128#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100129 HA_SPIN_INIT(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100130#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200131 return pool;
132}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100133
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100134#ifdef CONFIG_HAP_LOCKLESS_POOLS
Olivier Houchardcf975d42018-01-24 18:38:31 +0100135/* Allocates new entries for pool <pool> until there are at least <avail> + 1
136 * available, then returns the last one for immediate use, so that at least
137 * <avail> are left available in the pool upon return. NULL is returned if the
138 * last entry could not be allocated. It's important to note that at least one
139 * allocation is always performed even if there are enough entries in the pool.
140 * A call to the garbage collector is performed at most once in case malloc()
141 * returns an error, before returning NULL.
142 */
143void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
144{
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200145 void *ptr = NULL, **free_list;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100146 int failed = 0;
147 int size = pool->size;
148 int limit = pool->limit;
149 int allocated = pool->allocated, allocated_orig = allocated;
150
151 /* stop point */
152 avail += pool->used;
153
154 while (1) {
155 if (limit && allocated >= limit) {
156 HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
157 return NULL;
158 }
159
160 ptr = malloc(size + POOL_EXTRA);
161 if (!ptr) {
162 HA_ATOMIC_ADD(&pool->failed, 1);
163 if (failed)
164 return NULL;
165 failed++;
166 pool_gc(pool);
167 continue;
168 }
169 if (++allocated > avail)
170 break;
171
172 free_list = pool->free_list;
173 do {
174 *POOL_LINK(pool, ptr) = free_list;
175 __ha_barrier_store();
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200176 } while (HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100177 }
178
179 HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
180 HA_ATOMIC_ADD(&pool->used, 1);
181
182#ifdef DEBUG_MEMORY_POOLS
183 /* keep track of where the element was allocated from */
184 *POOL_LINK(pool, ptr) = (void *)pool;
185#endif
186 return ptr;
187}
188void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
189{
190 void *ptr;
191
192 ptr = __pool_refill_alloc(pool, avail);
193 return ptr;
194}
195/*
196 * This function frees whatever can be freed in pool <pool>.
197 */
198void pool_flush(struct pool_head *pool)
199{
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200200 void **next, *temp;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100201 int removed = 0;
202
203 if (!pool)
204 return;
205 do {
206 next = pool->free_list;
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200207 } while (!HA_ATOMIC_CAS(&pool->free_list, &next, NULL));
Olivier Houchardcf975d42018-01-24 18:38:31 +0100208 while (next) {
209 temp = next;
210 next = *POOL_LINK(pool, temp);
211 removed++;
212 free(temp);
213 }
214 pool->free_list = next;
215 HA_ATOMIC_SUB(&pool->allocated, removed);
216 /* here, we should have pool->allocate == pool->used */
217}
218
219/*
220 * This function frees whatever can be freed in all pools, but respecting
221 * the minimum thresholds imposed by owners. It takes care of avoiding
222 * recursion because it may be called from a signal handler.
223 *
224 * <pool_ctx> is unused
225 */
226void pool_gc(struct pool_head *pool_ctx)
227{
228 static int recurse;
229 int cur_recurse = 0;
230 struct pool_head *entry;
231
232 if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
233 return;
234
235 list_for_each_entry(entry, &pools, list) {
236 while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
237 struct pool_free_list cmp, new;
238
239 cmp.seq = entry->seq;
240 __ha_barrier_load();
241 cmp.free_list = entry->free_list;
242 __ha_barrier_load();
243 if (cmp.free_list == NULL)
244 break;
245 new.free_list = *POOL_LINK(entry, cmp.free_list);
246 new.seq = cmp.seq + 1;
247 if (__ha_cas_dw(&entry->free_list, &cmp, &new) == 0)
248 continue;
249 free(cmp.free_list);
250 HA_ATOMIC_SUB(&entry->allocated, 1);
251 }
252 }
253
254 HA_ATOMIC_STORE(&recurse, 0);
255}
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200256
257/* frees an object to the local cache, possibly pushing oldest objects to the
258 * global pool. Must not be called directly.
259 */
260void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx)
261{
262 struct pool_cache_item *item = (struct pool_cache_item *)ptr;
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100263 struct pool_cache_head *ph = &pool_cache[tid][idx];
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200264
265 LIST_ADD(&ph->list, &item->by_pool);
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100266 LIST_ADD(&pool_lru_head[tid], &item->by_lru);
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200267 ph->count++;
268 pool_cache_count++;
269 pool_cache_bytes += ph->size;
270
271 if (pool_cache_bytes <= CONFIG_HAP_POOL_CACHE_SIZE)
272 return;
273
274 do {
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100275 item = LIST_PREV(&pool_lru_head[tid], struct pool_cache_item *, by_lru);
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200276 /* note: by definition we remove oldest objects so they also are the
277 * oldest in their own pools, thus their next is the pool's head.
278 */
279 ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
280 LIST_DEL(&item->by_pool);
281 LIST_DEL(&item->by_lru);
282 ph->count--;
283 pool_cache_count--;
284 pool_cache_bytes -= ph->size;
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100285 __pool_free(pool_base_start + (ph - pool_cache[tid]), item);
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200286 } while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
287}
288
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100289#else /* CONFIG_HAP_LOCKLESS_POOLS */
Willy Tarreau50e608d2007-05-13 18:26:08 +0200290
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100291/* Allocates new entries for pool <pool> until there are at least <avail> + 1
292 * available, then returns the last one for immediate use, so that at least
293 * <avail> are left available in the pool upon return. NULL is returned if the
294 * last entry could not be allocated. It's important to note that at least one
295 * allocation is always performed even if there are enough entries in the pool.
296 * A call to the garbage collector is performed at most once in case malloc()
297 * returns an error, before returning NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200298 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200299void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
Willy Tarreau50e608d2007-05-13 18:26:08 +0200300{
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100301 void *ptr = NULL;
302 int failed = 0;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200303
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100304 /* stop point */
305 avail += pool->used;
306
307 while (1) {
308 if (pool->limit && pool->allocated >= pool->limit)
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200309 return NULL;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100310
Willy Tarreauf13322e2017-11-22 10:50:54 +0100311 ptr = pool_alloc_area(pool->size + POOL_EXTRA);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100312 if (!ptr) {
Willy Tarreau58102cf2015-10-28 16:24:21 +0100313 pool->failed++;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100314 if (failed)
315 return NULL;
316 failed++;
Willy Tarreaubafbe012017-11-24 17:34:44 +0100317 pool_gc(pool);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100318 continue;
319 }
320 if (++pool->allocated > avail)
321 break;
322
Willy Tarreauac421112015-10-28 15:09:29 +0100323 *POOL_LINK(pool, ptr) = (void *)pool->free_list;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100324 pool->free_list = ptr;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200325 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200326 pool->used++;
Willy Tarreaude30a682015-10-28 15:23:51 +0100327#ifdef DEBUG_MEMORY_POOLS
328 /* keep track of where the element was allocated from */
329 *POOL_LINK(pool, ptr) = (void *)pool;
330#endif
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100331 return ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200332}
Christopher Fauletb349e482017-08-29 09:52:38 +0200333void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
334{
335 void *ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200336
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100337 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200338 ptr = __pool_refill_alloc(pool, avail);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100339 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200340 return ptr;
341}
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200342/*
343 * This function frees whatever can be freed in pool <pool>.
344 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100345void pool_flush(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200346{
347 void *temp, *next;
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200348 if (!pool)
349 return;
350
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100351 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200352 next = pool->free_list;
353 while (next) {
354 temp = next;
Willy Tarreauac421112015-10-28 15:09:29 +0100355 next = *POOL_LINK(pool, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200356 pool->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100357 pool_free_area(temp, pool->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200358 }
359 pool->free_list = next;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100360 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200361 /* here, we should have pool->allocate == pool->used */
362}
363
364/*
365 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200366 * the minimum thresholds imposed by owners. It takes care of avoiding
367 * recursion because it may be called from a signal handler.
Christopher Fauletb349e482017-08-29 09:52:38 +0200368 *
Willy Tarreaubafbe012017-11-24 17:34:44 +0100369 * <pool_ctx> is used when pool_gc is called to release resources to allocate
Christopher Fauletb349e482017-08-29 09:52:38 +0200370 * an element in __pool_refill_alloc. It is important because <pool_ctx> is
371 * already locked, so we need to skip the lock here.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200372 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100373void pool_gc(struct pool_head *pool_ctx)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200374{
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200375 static int recurse;
Christopher Fauletb349e482017-08-29 09:52:38 +0200376 int cur_recurse = 0;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200377 struct pool_head *entry;
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200378
Christopher Fauletb349e482017-08-29 09:52:38 +0200379 if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
380 return;
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200381
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200382 list_for_each_entry(entry, &pools, list) {
383 void *temp, *next;
384 //qfprintf(stderr, "Flushing pool %s\n", entry->name);
Christopher Fauletb349e482017-08-29 09:52:38 +0200385 if (entry != pool_ctx)
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100386 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200387 next = entry->free_list;
388 while (next &&
Willy Tarreau57767b82014-12-22 21:40:55 +0100389 (int)(entry->allocated - entry->used) > (int)entry->minavail) {
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200390 temp = next;
Willy Tarreauac421112015-10-28 15:09:29 +0100391 next = *POOL_LINK(entry, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200392 entry->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100393 pool_free_area(temp, entry->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200394 }
395 entry->free_list = next;
Christopher Fauletb349e482017-08-29 09:52:38 +0200396 if (entry != pool_ctx)
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100397 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200398 }
Christopher Fauletb349e482017-08-29 09:52:38 +0200399
400 HA_ATOMIC_STORE(&recurse, 0);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200401}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100402#endif
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200403
404/*
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200405 * This function destroys a pool by freeing it completely, unless it's still
406 * in use. This should be called only under extreme circumstances. It always
407 * returns NULL if the resulting pool is empty, easing the clearing of the old
408 * pointer, otherwise it returns the pool.
409 * .
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200410 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100411void *pool_destroy(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200412{
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200413 if (pool) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100414 pool_flush(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200415 if (pool->used)
416 return pool;
417 pool->users--;
418 if (!pool->users) {
419 LIST_DEL(&pool->list);
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100420#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100421 HA_SPIN_DESTROY(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100422#endif
Willy Tarreau0a93b642018-10-16 07:58:39 +0200423 if ((pool - pool_base_start) < MAX_BASE_POOLS)
424 memset(pool, 0, sizeof(*pool));
425 else
426 free(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200427 }
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200428 }
429 return NULL;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200430}
431
Willy Tarreau2455ceb2018-11-26 15:57:34 +0100432/* This destroys all pools on exit. It is *not* thread safe. */
433void pool_destroy_all()
434{
435 struct pool_head *entry, *back;
436
437 list_for_each_entry_safe(entry, back, &pools, list)
438 pool_destroy(entry);
439}
440
Willy Tarreau12833bb2014-01-28 16:49:56 +0100441/* This function dumps memory usage information into the trash buffer. */
442void dump_pools_to_trash()
Willy Tarreau50e608d2007-05-13 18:26:08 +0200443{
444 struct pool_head *entry;
445 unsigned long allocated, used;
446 int nbpools;
447
448 allocated = used = nbpools = 0;
Willy Tarreau12833bb2014-01-28 16:49:56 +0100449 chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200450 list_for_each_entry(entry, &pools, list) {
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100451#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100452 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100453#endif
Willy Tarreau0a93b642018-10-16 07:58:39 +0200454 chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users, @%p=%02d%s\n",
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200455 entry->name, entry->size, entry->allocated,
Willy Tarreau58102cf2015-10-28 16:24:21 +0100456 entry->size * entry->allocated, entry->used, entry->failed,
Willy Tarreau0a93b642018-10-16 07:58:39 +0200457 entry->users, entry, (int)pool_get_index(entry),
458 (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200459
460 allocated += entry->allocated * entry->size;
461 used += entry->used * entry->size;
462 nbpools++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100463#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100464 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100465#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200466 }
Willy Tarreau12833bb2014-01-28 16:49:56 +0100467 chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
Willy Tarreau50e608d2007-05-13 18:26:08 +0200468 nbpools, allocated, used);
469}
470
Willy Tarreau12833bb2014-01-28 16:49:56 +0100471/* Dump statistics on pools usage. */
472void dump_pools(void)
473{
474 dump_pools_to_trash();
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200475 qfprintf(stderr, "%s", trash.area);
Willy Tarreau12833bb2014-01-28 16:49:56 +0100476}
477
Willy Tarreau58102cf2015-10-28 16:24:21 +0100478/* This function returns the total number of failed pool allocations */
479int pool_total_failures()
480{
481 struct pool_head *entry;
482 int failed = 0;
483
484 list_for_each_entry(entry, &pools, list)
485 failed += entry->failed;
486 return failed;
487}
488
489/* This function returns the total amount of memory allocated in pools (in bytes) */
490unsigned long pool_total_allocated()
491{
492 struct pool_head *entry;
493 unsigned long allocated = 0;
494
495 list_for_each_entry(entry, &pools, list)
496 allocated += entry->allocated * entry->size;
497 return allocated;
498}
499
500/* This function returns the total amount of memory used in pools (in bytes) */
501unsigned long pool_total_used()
502{
503 struct pool_head *entry;
504 unsigned long used = 0;
505
506 list_for_each_entry(entry, &pools, list)
507 used += entry->used * entry->size;
508 return used;
509}
510
William Lallemande7ed8852016-11-19 02:25:36 +0100511/* This function dumps memory usage information onto the stream interface's
512 * read buffer. It returns 0 as long as it does not complete, non-zero upon
513 * completion. No state is used.
514 */
515static int cli_io_handler_dump_pools(struct appctx *appctx)
516{
517 struct stream_interface *si = appctx->owner;
518
519 dump_pools_to_trash();
Willy Tarreau06d80a92017-10-19 14:32:15 +0200520 if (ci_putchk(si_ic(si), &trash) == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +0100521 si_rx_room_blk(si);
William Lallemande7ed8852016-11-19 02:25:36 +0100522 return 0;
523 }
524 return 1;
525}
526
Willy Tarreau7107c8b2018-11-26 11:44:35 +0100527/* callback used to create early pool <name> of size <size> and store the
528 * resulting pointer into <ptr>. If the allocation fails, it quits with after
529 * emitting an error message.
530 */
531void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
532{
533 *ptr = create_pool(name, size, MEM_F_SHARED);
534 if (!*ptr) {
535 ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
536 name, size, strerror(errno));
537 exit(1);
538 }
539}
540
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100541/* Initializes all per-thread arrays on startup */
542static void init_pools()
543{
544 int thr, idx;
545
546 for (thr = 0; thr < MAX_THREADS; thr++) {
547 for (idx = 0; idx < MAX_BASE_POOLS; idx++) {
548 LIST_INIT(&pool_cache[thr][idx].list);
549 pool_cache[thr][idx].size = 0;
550 }
551 LIST_INIT(&pool_lru_head[thr]);
552 }
553}
554
555INITCALL0(STG_PREPARE, init_pools);
Willy Tarreau7107c8b2018-11-26 11:44:35 +0100556
William Lallemande7ed8852016-11-19 02:25:36 +0100557/* register cli keywords */
558static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaue9ecec82016-12-16 18:55:23 +0100559 { { "show", "pools", NULL }, "show pools : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
William Lallemande7ed8852016-11-19 02:25:36 +0100560 {{},}
561}};
562
Willy Tarreau0108d902018-11-25 19:14:37 +0100563INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
William Lallemande7ed8852016-11-19 02:25:36 +0100564
Willy Tarreau50e608d2007-05-13 18:26:08 +0200565/*
566 * Local variables:
567 * c-indent-level: 8
568 * c-basic-offset: 8
569 * End:
570 */