blob: 6a345e5569167a8db13ff5923d76fe7f40c3e265 [file] [log] [blame]
Willy Tarreau50e608d2007-05-13 18:26:08 +02001/*
2 * Memory management functions.
3 *
4 * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
William Lallemande7ed8852016-11-19 02:25:36 +010013#include <types/applet.h>
14#include <types/cli.h>
Willy Tarreau12833bb2014-01-28 16:49:56 +010015#include <types/global.h>
William Lallemande7ed8852016-11-19 02:25:36 +010016#include <types/stats.h>
17
Willy Tarreau50e608d2007-05-13 18:26:08 +020018#include <common/config.h>
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020019#include <common/debug.h>
Willy Tarreaue18db9e2018-10-16 10:28:54 +020020#include <common/hathreads.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020021#include <common/memory.h>
22#include <common/mini-clist.h>
23#include <common/standard.h>
24
William Lallemande7ed8852016-11-19 02:25:36 +010025#include <proto/applet.h>
26#include <proto/cli.h>
27#include <proto/channel.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020028#include <proto/log.h>
William Lallemande7ed8852016-11-19 02:25:36 +010029#include <proto/stream_interface.h>
30#include <proto/stats.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020031
Willy Tarreau0a93b642018-10-16 07:58:39 +020032/* These are the most common pools, expected to be initialized first. These
33 * ones are allocated from an array, allowing to map them to an index.
34 */
35struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
36unsigned int pool_base_count = 0;
37
Willy Tarreaue18db9e2018-10-16 10:28:54 +020038THREAD_LOCAL struct pool_cache_head pool_cache[MAX_BASE_POOLS] = { };
39THREAD_LOCAL struct list pool_lru_head = { }; /* oldest objects */
40THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
41THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
42
Willy Tarreau50e608d2007-05-13 18:26:08 +020043static struct list pools = LIST_HEAD_INIT(pools);
Willy Tarreau067ac9f2015-10-08 14:12:13 +020044int mem_poison_byte = -1;
Willy Tarreau50e608d2007-05-13 18:26:08 +020045
46/* Try to find an existing shared pool with the same characteristics and
47 * returns it, otherwise creates this one. NULL is returned if no memory
Willy Tarreau581bf812016-01-25 02:19:13 +010048 * is available for a new creation. Two flags are supported :
49 * - MEM_F_SHARED to indicate that the pool may be shared with other users
50 * - MEM_F_EXACT to indicate that the size must not be rounded up
Willy Tarreau50e608d2007-05-13 18:26:08 +020051 */
52struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
53{
54 struct pool_head *pool;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020055 struct pool_head *entry;
56 struct list *start;
Willy Tarreau50e608d2007-05-13 18:26:08 +020057 unsigned int align;
58
Willy Tarreauac421112015-10-28 15:09:29 +010059 /* We need to store a (void *) at the end of the chunks. Since we know
Willy Tarreau50e608d2007-05-13 18:26:08 +020060 * that the malloc() function will never return such a small size,
61 * let's round the size up to something slightly bigger, in order to
62 * ease merging of entries. Note that the rounding is a power of two.
Willy Tarreauac421112015-10-28 15:09:29 +010063 * This extra (void *) is not accounted for in the size computation
64 * so that the visible parts outside are not affected.
Willy Tarreau50e608d2007-05-13 18:26:08 +020065 */
66
Willy Tarreau581bf812016-01-25 02:19:13 +010067 if (!(flags & MEM_F_EXACT)) {
68 align = 16;
69 size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
70 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020071
Christopher Fauletb349e482017-08-29 09:52:38 +020072 /* TODO: thread: we do not lock pool list for now because all pools are
73 * created during HAProxy startup (so before threads creation) */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020074 start = &pools;
Willy Tarreau50e608d2007-05-13 18:26:08 +020075 pool = NULL;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020076
77 list_for_each_entry(entry, &pools, list) {
78 if (entry->size == size) {
79 /* either we can share this place and we take it, or
80 * we look for a sharable one or for the next position
81 * before which we will insert a new one.
82 */
83 if (flags & entry->flags & MEM_F_SHARED) {
84 /* we can share this one */
Willy Tarreau50e608d2007-05-13 18:26:08 +020085 pool = entry;
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020086 DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
Willy Tarreau50e608d2007-05-13 18:26:08 +020087 break;
88 }
89 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020090 else if (entry->size > size) {
91 /* insert before this one */
92 start = &entry->list;
93 break;
94 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020095 }
96
97 if (!pool) {
Willy Tarreau0a93b642018-10-16 07:58:39 +020098 if (pool_base_count < MAX_BASE_POOLS)
99 pool = &pool_base_start[pool_base_count++];
100
101 if (!pool) {
102 /* look for a freed entry */
103 for (entry = pool_base_start; entry != pool_base_start + MAX_BASE_POOLS; entry++) {
104 if (!entry->size) {
105 pool = entry;
106 break;
107 }
108 }
109 }
110
111 if (!pool)
112 pool = calloc(1, sizeof(*pool));
113
Willy Tarreau50e608d2007-05-13 18:26:08 +0200114 if (!pool)
115 return NULL;
116 if (name)
117 strlcpy2(pool->name, name, sizeof(pool->name));
118 pool->size = size;
119 pool->flags = flags;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200120 LIST_ADDQ(start, &pool->list);
Willy Tarreau50e608d2007-05-13 18:26:08 +0200121 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200122 pool->users++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100123#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100124 HA_SPIN_INIT(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100125#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200126 return pool;
127}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100128
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100129#ifdef CONFIG_HAP_LOCKLESS_POOLS
Olivier Houchardcf975d42018-01-24 18:38:31 +0100130/* Allocates new entries for pool <pool> until there are at least <avail> + 1
131 * available, then returns the last one for immediate use, so that at least
132 * <avail> are left available in the pool upon return. NULL is returned if the
133 * last entry could not be allocated. It's important to note that at least one
134 * allocation is always performed even if there are enough entries in the pool.
135 * A call to the garbage collector is performed at most once in case malloc()
136 * returns an error, before returning NULL.
137 */
138void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
139{
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200140 void *ptr = NULL, **free_list;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100141 int failed = 0;
142 int size = pool->size;
143 int limit = pool->limit;
144 int allocated = pool->allocated, allocated_orig = allocated;
145
146 /* stop point */
147 avail += pool->used;
148
149 while (1) {
150 if (limit && allocated >= limit) {
151 HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
152 return NULL;
153 }
154
155 ptr = malloc(size + POOL_EXTRA);
156 if (!ptr) {
157 HA_ATOMIC_ADD(&pool->failed, 1);
158 if (failed)
159 return NULL;
160 failed++;
161 pool_gc(pool);
162 continue;
163 }
164 if (++allocated > avail)
165 break;
166
167 free_list = pool->free_list;
168 do {
169 *POOL_LINK(pool, ptr) = free_list;
170 __ha_barrier_store();
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200171 } while (HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100172 }
173
174 HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
175 HA_ATOMIC_ADD(&pool->used, 1);
176
177#ifdef DEBUG_MEMORY_POOLS
178 /* keep track of where the element was allocated from */
179 *POOL_LINK(pool, ptr) = (void *)pool;
180#endif
181 return ptr;
182}
183void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
184{
185 void *ptr;
186
187 ptr = __pool_refill_alloc(pool, avail);
188 return ptr;
189}
190/*
191 * This function frees whatever can be freed in pool <pool>.
192 */
193void pool_flush(struct pool_head *pool)
194{
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200195 void **next, *temp;
Olivier Houchardcf975d42018-01-24 18:38:31 +0100196 int removed = 0;
197
198 if (!pool)
199 return;
200 do {
201 next = pool->free_list;
Olivier Houchard8b2c8a72018-10-21 01:52:59 +0200202 } while (!HA_ATOMIC_CAS(&pool->free_list, &next, NULL));
Olivier Houchardcf975d42018-01-24 18:38:31 +0100203 while (next) {
204 temp = next;
205 next = *POOL_LINK(pool, temp);
206 removed++;
207 free(temp);
208 }
209 pool->free_list = next;
210 HA_ATOMIC_SUB(&pool->allocated, removed);
211 /* here, we should have pool->allocate == pool->used */
212}
213
214/*
215 * This function frees whatever can be freed in all pools, but respecting
216 * the minimum thresholds imposed by owners. It takes care of avoiding
217 * recursion because it may be called from a signal handler.
218 *
219 * <pool_ctx> is unused
220 */
221void pool_gc(struct pool_head *pool_ctx)
222{
223 static int recurse;
224 int cur_recurse = 0;
225 struct pool_head *entry;
226
227 if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
228 return;
229
230 list_for_each_entry(entry, &pools, list) {
231 while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
232 struct pool_free_list cmp, new;
233
234 cmp.seq = entry->seq;
235 __ha_barrier_load();
236 cmp.free_list = entry->free_list;
237 __ha_barrier_load();
238 if (cmp.free_list == NULL)
239 break;
240 new.free_list = *POOL_LINK(entry, cmp.free_list);
241 new.seq = cmp.seq + 1;
242 if (__ha_cas_dw(&entry->free_list, &cmp, &new) == 0)
243 continue;
244 free(cmp.free_list);
245 HA_ATOMIC_SUB(&entry->allocated, 1);
246 }
247 }
248
249 HA_ATOMIC_STORE(&recurse, 0);
250}
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200251
252/* frees an object to the local cache, possibly pushing oldest objects to the
253 * global pool. Must not be called directly.
254 */
255void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx)
256{
257 struct pool_cache_item *item = (struct pool_cache_item *)ptr;
258 struct pool_cache_head *ph = &pool_cache[idx];
259
260 /* never allocated or empty */
261 if (unlikely(ph->list.n == NULL)) {
262 LIST_INIT(&ph->list);
263 ph->size = pool->size;
264 if (pool_lru_head.n == NULL)
265 LIST_INIT(&pool_lru_head);
266 }
267
268 LIST_ADD(&ph->list, &item->by_pool);
269 LIST_ADD(&pool_lru_head, &item->by_lru);
270 ph->count++;
271 pool_cache_count++;
272 pool_cache_bytes += ph->size;
273
274 if (pool_cache_bytes <= CONFIG_HAP_POOL_CACHE_SIZE)
275 return;
276
277 do {
278 item = LIST_PREV(&pool_lru_head, struct pool_cache_item *, by_lru);
279 /* note: by definition we remove oldest objects so they also are the
280 * oldest in their own pools, thus their next is the pool's head.
281 */
282 ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
283 LIST_DEL(&item->by_pool);
284 LIST_DEL(&item->by_lru);
285 ph->count--;
286 pool_cache_count--;
287 pool_cache_bytes -= ph->size;
288 __pool_free(pool_base_start + (ph - pool_cache), item);
289 } while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
290}
291
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100292#else /* CONFIG_HAP_LOCKLESS_POOLS */
Willy Tarreau50e608d2007-05-13 18:26:08 +0200293
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100294/* Allocates new entries for pool <pool> until there are at least <avail> + 1
295 * available, then returns the last one for immediate use, so that at least
296 * <avail> are left available in the pool upon return. NULL is returned if the
297 * last entry could not be allocated. It's important to note that at least one
298 * allocation is always performed even if there are enough entries in the pool.
299 * A call to the garbage collector is performed at most once in case malloc()
300 * returns an error, before returning NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200301 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200302void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
Willy Tarreau50e608d2007-05-13 18:26:08 +0200303{
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100304 void *ptr = NULL;
305 int failed = 0;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200306
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100307 /* stop point */
308 avail += pool->used;
309
310 while (1) {
311 if (pool->limit && pool->allocated >= pool->limit)
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200312 return NULL;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100313
Willy Tarreauf13322e2017-11-22 10:50:54 +0100314 ptr = pool_alloc_area(pool->size + POOL_EXTRA);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100315 if (!ptr) {
Willy Tarreau58102cf2015-10-28 16:24:21 +0100316 pool->failed++;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100317 if (failed)
318 return NULL;
319 failed++;
Willy Tarreaubafbe012017-11-24 17:34:44 +0100320 pool_gc(pool);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100321 continue;
322 }
323 if (++pool->allocated > avail)
324 break;
325
Willy Tarreauac421112015-10-28 15:09:29 +0100326 *POOL_LINK(pool, ptr) = (void *)pool->free_list;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100327 pool->free_list = ptr;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200328 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200329 pool->used++;
Willy Tarreaude30a682015-10-28 15:23:51 +0100330#ifdef DEBUG_MEMORY_POOLS
331 /* keep track of where the element was allocated from */
332 *POOL_LINK(pool, ptr) = (void *)pool;
333#endif
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100334 return ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200335}
Christopher Fauletb349e482017-08-29 09:52:38 +0200336void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
337{
338 void *ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200339
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100340 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200341 ptr = __pool_refill_alloc(pool, avail);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100342 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200343 return ptr;
344}
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200345/*
346 * This function frees whatever can be freed in pool <pool>.
347 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100348void pool_flush(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200349{
350 void *temp, *next;
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200351 if (!pool)
352 return;
353
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100354 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200355 next = pool->free_list;
356 while (next) {
357 temp = next;
Willy Tarreauac421112015-10-28 15:09:29 +0100358 next = *POOL_LINK(pool, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200359 pool->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100360 pool_free_area(temp, pool->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200361 }
362 pool->free_list = next;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100363 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200364 /* here, we should have pool->allocate == pool->used */
365}
366
367/*
368 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200369 * the minimum thresholds imposed by owners. It takes care of avoiding
370 * recursion because it may be called from a signal handler.
Christopher Fauletb349e482017-08-29 09:52:38 +0200371 *
Willy Tarreaubafbe012017-11-24 17:34:44 +0100372 * <pool_ctx> is used when pool_gc is called to release resources to allocate
Christopher Fauletb349e482017-08-29 09:52:38 +0200373 * an element in __pool_refill_alloc. It is important because <pool_ctx> is
374 * already locked, so we need to skip the lock here.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200375 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100376void pool_gc(struct pool_head *pool_ctx)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200377{
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200378 static int recurse;
Christopher Fauletb349e482017-08-29 09:52:38 +0200379 int cur_recurse = 0;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200380 struct pool_head *entry;
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200381
Christopher Fauletb349e482017-08-29 09:52:38 +0200382 if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
383 return;
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200384
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200385 list_for_each_entry(entry, &pools, list) {
386 void *temp, *next;
387 //qfprintf(stderr, "Flushing pool %s\n", entry->name);
Christopher Fauletb349e482017-08-29 09:52:38 +0200388 if (entry != pool_ctx)
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100389 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200390 next = entry->free_list;
391 while (next &&
Willy Tarreau57767b82014-12-22 21:40:55 +0100392 (int)(entry->allocated - entry->used) > (int)entry->minavail) {
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200393 temp = next;
Willy Tarreauac421112015-10-28 15:09:29 +0100394 next = *POOL_LINK(entry, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200395 entry->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100396 pool_free_area(temp, entry->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200397 }
398 entry->free_list = next;
Christopher Fauletb349e482017-08-29 09:52:38 +0200399 if (entry != pool_ctx)
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100400 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200401 }
Christopher Fauletb349e482017-08-29 09:52:38 +0200402
403 HA_ATOMIC_STORE(&recurse, 0);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200404}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100405#endif
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200406
407/*
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200408 * This function destroys a pool by freeing it completely, unless it's still
409 * in use. This should be called only under extreme circumstances. It always
410 * returns NULL if the resulting pool is empty, easing the clearing of the old
411 * pointer, otherwise it returns the pool.
412 * .
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200413 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100414void *pool_destroy(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200415{
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200416 if (pool) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100417 pool_flush(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200418 if (pool->used)
419 return pool;
420 pool->users--;
421 if (!pool->users) {
422 LIST_DEL(&pool->list);
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100423#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100424 HA_SPIN_DESTROY(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100425#endif
Willy Tarreau0a93b642018-10-16 07:58:39 +0200426 if ((pool - pool_base_start) < MAX_BASE_POOLS)
427 memset(pool, 0, sizeof(*pool));
428 else
429 free(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200430 }
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200431 }
432 return NULL;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200433}
434
Willy Tarreau12833bb2014-01-28 16:49:56 +0100435/* This function dumps memory usage information into the trash buffer. */
436void dump_pools_to_trash()
Willy Tarreau50e608d2007-05-13 18:26:08 +0200437{
438 struct pool_head *entry;
439 unsigned long allocated, used;
440 int nbpools;
441
442 allocated = used = nbpools = 0;
Willy Tarreau12833bb2014-01-28 16:49:56 +0100443 chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200444 list_for_each_entry(entry, &pools, list) {
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100445#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100446 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100447#endif
Willy Tarreau0a93b642018-10-16 07:58:39 +0200448 chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users, @%p=%02d%s\n",
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200449 entry->name, entry->size, entry->allocated,
Willy Tarreau58102cf2015-10-28 16:24:21 +0100450 entry->size * entry->allocated, entry->used, entry->failed,
Willy Tarreau0a93b642018-10-16 07:58:39 +0200451 entry->users, entry, (int)pool_get_index(entry),
452 (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200453
454 allocated += entry->allocated * entry->size;
455 used += entry->used * entry->size;
456 nbpools++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100457#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100458 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100459#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200460 }
Willy Tarreau12833bb2014-01-28 16:49:56 +0100461 chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
Willy Tarreau50e608d2007-05-13 18:26:08 +0200462 nbpools, allocated, used);
463}
464
Willy Tarreau12833bb2014-01-28 16:49:56 +0100465/* Dump statistics on pools usage. */
466void dump_pools(void)
467{
468 dump_pools_to_trash();
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200469 qfprintf(stderr, "%s", trash.area);
Willy Tarreau12833bb2014-01-28 16:49:56 +0100470}
471
Willy Tarreau58102cf2015-10-28 16:24:21 +0100472/* This function returns the total number of failed pool allocations */
473int pool_total_failures()
474{
475 struct pool_head *entry;
476 int failed = 0;
477
478 list_for_each_entry(entry, &pools, list)
479 failed += entry->failed;
480 return failed;
481}
482
483/* This function returns the total amount of memory allocated in pools (in bytes) */
484unsigned long pool_total_allocated()
485{
486 struct pool_head *entry;
487 unsigned long allocated = 0;
488
489 list_for_each_entry(entry, &pools, list)
490 allocated += entry->allocated * entry->size;
491 return allocated;
492}
493
494/* This function returns the total amount of memory used in pools (in bytes) */
495unsigned long pool_total_used()
496{
497 struct pool_head *entry;
498 unsigned long used = 0;
499
500 list_for_each_entry(entry, &pools, list)
501 used += entry->used * entry->size;
502 return used;
503}
504
William Lallemande7ed8852016-11-19 02:25:36 +0100505/* This function dumps memory usage information onto the stream interface's
506 * read buffer. It returns 0 as long as it does not complete, non-zero upon
507 * completion. No state is used.
508 */
509static int cli_io_handler_dump_pools(struct appctx *appctx)
510{
511 struct stream_interface *si = appctx->owner;
512
513 dump_pools_to_trash();
Willy Tarreau06d80a92017-10-19 14:32:15 +0200514 if (ci_putchk(si_ic(si), &trash) == -1) {
William Lallemande7ed8852016-11-19 02:25:36 +0100515 si_applet_cant_put(si);
516 return 0;
517 }
518 return 1;
519}
520
William Lallemande7ed8852016-11-19 02:25:36 +0100521/* register cli keywords */
522static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaue9ecec82016-12-16 18:55:23 +0100523 { { "show", "pools", NULL }, "show pools : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
William Lallemande7ed8852016-11-19 02:25:36 +0100524 {{},}
525}};
526
527__attribute__((constructor))
528static void __memory_init(void)
529{
530 cli_register_kw(&cli_kws);
531}
532
Willy Tarreau50e608d2007-05-13 18:26:08 +0200533/*
534 * Local variables:
535 * c-indent-level: 8
536 * c-basic-offset: 8
537 * End:
538 */