blob: c24339616b29ac15e12456f5c25a8a24ed84be10 [file] [log] [blame]
Willy Tarreau50e608d2007-05-13 18:26:08 +02001/*
2 * Memory management functions.
3 *
4 * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
William Lallemande7ed8852016-11-19 02:25:36 +010013#include <types/applet.h>
14#include <types/cli.h>
Willy Tarreau12833bb2014-01-28 16:49:56 +010015#include <types/global.h>
William Lallemande7ed8852016-11-19 02:25:36 +010016#include <types/stats.h>
17
Willy Tarreau50e608d2007-05-13 18:26:08 +020018#include <common/config.h>
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020019#include <common/debug.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020020#include <common/memory.h>
21#include <common/mini-clist.h>
22#include <common/standard.h>
23
William Lallemande7ed8852016-11-19 02:25:36 +010024#include <proto/applet.h>
25#include <proto/cli.h>
26#include <proto/channel.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020027#include <proto/log.h>
William Lallemande7ed8852016-11-19 02:25:36 +010028#include <proto/stream_interface.h>
29#include <proto/stats.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020030
Willy Tarreau0a93b642018-10-16 07:58:39 +020031/* These are the most common pools, expected to be initialized first. These
32 * ones are allocated from an array, allowing to map them to an index.
33 */
34struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
35unsigned int pool_base_count = 0;
36
Willy Tarreau50e608d2007-05-13 18:26:08 +020037static struct list pools = LIST_HEAD_INIT(pools);
Willy Tarreau067ac9f2015-10-08 14:12:13 +020038int mem_poison_byte = -1;
Willy Tarreau50e608d2007-05-13 18:26:08 +020039
40/* Try to find an existing shared pool with the same characteristics and
41 * returns it, otherwise creates this one. NULL is returned if no memory
Willy Tarreau581bf812016-01-25 02:19:13 +010042 * is available for a new creation. Two flags are supported :
43 * - MEM_F_SHARED to indicate that the pool may be shared with other users
44 * - MEM_F_EXACT to indicate that the size must not be rounded up
Willy Tarreau50e608d2007-05-13 18:26:08 +020045 */
46struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
47{
48 struct pool_head *pool;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020049 struct pool_head *entry;
50 struct list *start;
Willy Tarreau50e608d2007-05-13 18:26:08 +020051 unsigned int align;
52
Willy Tarreauac421112015-10-28 15:09:29 +010053 /* We need to store a (void *) at the end of the chunks. Since we know
Willy Tarreau50e608d2007-05-13 18:26:08 +020054 * that the malloc() function will never return such a small size,
55 * let's round the size up to something slightly bigger, in order to
56 * ease merging of entries. Note that the rounding is a power of two.
Willy Tarreauac421112015-10-28 15:09:29 +010057 * This extra (void *) is not accounted for in the size computation
58 * so that the visible parts outside are not affected.
Willy Tarreau50e608d2007-05-13 18:26:08 +020059 */
60
Willy Tarreau581bf812016-01-25 02:19:13 +010061 if (!(flags & MEM_F_EXACT)) {
62 align = 16;
63 size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
64 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020065
Christopher Fauletb349e482017-08-29 09:52:38 +020066 /* TODO: thread: we do not lock pool list for now because all pools are
67 * created during HAProxy startup (so before threads creation) */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020068 start = &pools;
Willy Tarreau50e608d2007-05-13 18:26:08 +020069 pool = NULL;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020070
71 list_for_each_entry(entry, &pools, list) {
72 if (entry->size == size) {
73 /* either we can share this place and we take it, or
74 * we look for a sharable one or for the next position
75 * before which we will insert a new one.
76 */
77 if (flags & entry->flags & MEM_F_SHARED) {
78 /* we can share this one */
Willy Tarreau50e608d2007-05-13 18:26:08 +020079 pool = entry;
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020080 DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
Willy Tarreau50e608d2007-05-13 18:26:08 +020081 break;
82 }
83 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020084 else if (entry->size > size) {
85 /* insert before this one */
86 start = &entry->list;
87 break;
88 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020089 }
90
91 if (!pool) {
Willy Tarreau0a93b642018-10-16 07:58:39 +020092 if (pool_base_count < MAX_BASE_POOLS)
93 pool = &pool_base_start[pool_base_count++];
94
95 if (!pool) {
96 /* look for a freed entry */
97 for (entry = pool_base_start; entry != pool_base_start + MAX_BASE_POOLS; entry++) {
98 if (!entry->size) {
99 pool = entry;
100 break;
101 }
102 }
103 }
104
105 if (!pool)
106 pool = calloc(1, sizeof(*pool));
107
Willy Tarreau50e608d2007-05-13 18:26:08 +0200108 if (!pool)
109 return NULL;
110 if (name)
111 strlcpy2(pool->name, name, sizeof(pool->name));
112 pool->size = size;
113 pool->flags = flags;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200114 LIST_ADDQ(start, &pool->list);
Willy Tarreau50e608d2007-05-13 18:26:08 +0200115 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200116 pool->users++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100117#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100118 HA_SPIN_INIT(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100119#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200120 return pool;
121}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100122
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100123#ifdef CONFIG_HAP_LOCKLESS_POOLS
Olivier Houchardcf975d42018-01-24 18:38:31 +0100124/* Allocates new entries for pool <pool> until there are at least <avail> + 1
125 * available, then returns the last one for immediate use, so that at least
126 * <avail> are left available in the pool upon return. NULL is returned if the
127 * last entry could not be allocated. It's important to note that at least one
128 * allocation is always performed even if there are enough entries in the pool.
129 * A call to the garbage collector is performed at most once in case malloc()
130 * returns an error, before returning NULL.
131 */
132void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
133{
134 void *ptr = NULL, *free_list;
135 int failed = 0;
136 int size = pool->size;
137 int limit = pool->limit;
138 int allocated = pool->allocated, allocated_orig = allocated;
139
140 /* stop point */
141 avail += pool->used;
142
143 while (1) {
144 if (limit && allocated >= limit) {
145 HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
146 return NULL;
147 }
148
149 ptr = malloc(size + POOL_EXTRA);
150 if (!ptr) {
151 HA_ATOMIC_ADD(&pool->failed, 1);
152 if (failed)
153 return NULL;
154 failed++;
155 pool_gc(pool);
156 continue;
157 }
158 if (++allocated > avail)
159 break;
160
161 free_list = pool->free_list;
162 do {
163 *POOL_LINK(pool, ptr) = free_list;
164 __ha_barrier_store();
165 } while (HA_ATOMIC_CAS(&pool->free_list, (void *)&free_list, ptr) == 0);
166 }
167
168 HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
169 HA_ATOMIC_ADD(&pool->used, 1);
170
171#ifdef DEBUG_MEMORY_POOLS
172 /* keep track of where the element was allocated from */
173 *POOL_LINK(pool, ptr) = (void *)pool;
174#endif
175 return ptr;
176}
177void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
178{
179 void *ptr;
180
181 ptr = __pool_refill_alloc(pool, avail);
182 return ptr;
183}
184/*
185 * This function frees whatever can be freed in pool <pool>.
186 */
187void pool_flush(struct pool_head *pool)
188{
189 void *next, *temp;
190 int removed = 0;
191
192 if (!pool)
193 return;
194 do {
195 next = pool->free_list;
196 } while (!HA_ATOMIC_CAS(&pool->free_list, (void *)&next, NULL));
197 while (next) {
198 temp = next;
199 next = *POOL_LINK(pool, temp);
200 removed++;
201 free(temp);
202 }
203 pool->free_list = next;
204 HA_ATOMIC_SUB(&pool->allocated, removed);
205 /* here, we should have pool->allocate == pool->used */
206}
207
208/*
209 * This function frees whatever can be freed in all pools, but respecting
210 * the minimum thresholds imposed by owners. It takes care of avoiding
211 * recursion because it may be called from a signal handler.
212 *
213 * <pool_ctx> is unused
214 */
215void pool_gc(struct pool_head *pool_ctx)
216{
217 static int recurse;
218 int cur_recurse = 0;
219 struct pool_head *entry;
220
221 if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
222 return;
223
224 list_for_each_entry(entry, &pools, list) {
225 while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
226 struct pool_free_list cmp, new;
227
228 cmp.seq = entry->seq;
229 __ha_barrier_load();
230 cmp.free_list = entry->free_list;
231 __ha_barrier_load();
232 if (cmp.free_list == NULL)
233 break;
234 new.free_list = *POOL_LINK(entry, cmp.free_list);
235 new.seq = cmp.seq + 1;
236 if (__ha_cas_dw(&entry->free_list, &cmp, &new) == 0)
237 continue;
238 free(cmp.free_list);
239 HA_ATOMIC_SUB(&entry->allocated, 1);
240 }
241 }
242
243 HA_ATOMIC_STORE(&recurse, 0);
244}
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100245#else /* CONFIG_HAP_LOCKLESS_POOLS */
Willy Tarreau50e608d2007-05-13 18:26:08 +0200246
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100247/* Allocates new entries for pool <pool> until there are at least <avail> + 1
248 * available, then returns the last one for immediate use, so that at least
249 * <avail> are left available in the pool upon return. NULL is returned if the
250 * last entry could not be allocated. It's important to note that at least one
251 * allocation is always performed even if there are enough entries in the pool.
252 * A call to the garbage collector is performed at most once in case malloc()
253 * returns an error, before returning NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200254 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200255void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
Willy Tarreau50e608d2007-05-13 18:26:08 +0200256{
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100257 void *ptr = NULL;
258 int failed = 0;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200259
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100260 /* stop point */
261 avail += pool->used;
262
263 while (1) {
264 if (pool->limit && pool->allocated >= pool->limit)
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200265 return NULL;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100266
Willy Tarreauf13322e2017-11-22 10:50:54 +0100267 ptr = pool_alloc_area(pool->size + POOL_EXTRA);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100268 if (!ptr) {
Willy Tarreau58102cf2015-10-28 16:24:21 +0100269 pool->failed++;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100270 if (failed)
271 return NULL;
272 failed++;
Willy Tarreaubafbe012017-11-24 17:34:44 +0100273 pool_gc(pool);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100274 continue;
275 }
276 if (++pool->allocated > avail)
277 break;
278
Willy Tarreauac421112015-10-28 15:09:29 +0100279 *POOL_LINK(pool, ptr) = (void *)pool->free_list;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100280 pool->free_list = ptr;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200281 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200282 pool->used++;
Willy Tarreaude30a682015-10-28 15:23:51 +0100283#ifdef DEBUG_MEMORY_POOLS
284 /* keep track of where the element was allocated from */
285 *POOL_LINK(pool, ptr) = (void *)pool;
286#endif
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100287 return ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200288}
Christopher Fauletb349e482017-08-29 09:52:38 +0200289void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
290{
291 void *ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200292
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100293 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200294 ptr = __pool_refill_alloc(pool, avail);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100295 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200296 return ptr;
297}
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200298/*
299 * This function frees whatever can be freed in pool <pool>.
300 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100301void pool_flush(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200302{
303 void *temp, *next;
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200304 if (!pool)
305 return;
306
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100307 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200308 next = pool->free_list;
309 while (next) {
310 temp = next;
Willy Tarreauac421112015-10-28 15:09:29 +0100311 next = *POOL_LINK(pool, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200312 pool->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100313 pool_free_area(temp, pool->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200314 }
315 pool->free_list = next;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100316 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200317 /* here, we should have pool->allocate == pool->used */
318}
319
320/*
321 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200322 * the minimum thresholds imposed by owners. It takes care of avoiding
323 * recursion because it may be called from a signal handler.
Christopher Fauletb349e482017-08-29 09:52:38 +0200324 *
Willy Tarreaubafbe012017-11-24 17:34:44 +0100325 * <pool_ctx> is used when pool_gc is called to release resources to allocate
Christopher Fauletb349e482017-08-29 09:52:38 +0200326 * an element in __pool_refill_alloc. It is important because <pool_ctx> is
327 * already locked, so we need to skip the lock here.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200328 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100329void pool_gc(struct pool_head *pool_ctx)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200330{
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200331 static int recurse;
Christopher Fauletb349e482017-08-29 09:52:38 +0200332 int cur_recurse = 0;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200333 struct pool_head *entry;
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200334
Christopher Fauletb349e482017-08-29 09:52:38 +0200335 if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
336 return;
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200337
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200338 list_for_each_entry(entry, &pools, list) {
339 void *temp, *next;
340 //qfprintf(stderr, "Flushing pool %s\n", entry->name);
Christopher Fauletb349e482017-08-29 09:52:38 +0200341 if (entry != pool_ctx)
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100342 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200343 next = entry->free_list;
344 while (next &&
Willy Tarreau57767b82014-12-22 21:40:55 +0100345 (int)(entry->allocated - entry->used) > (int)entry->minavail) {
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200346 temp = next;
Willy Tarreauac421112015-10-28 15:09:29 +0100347 next = *POOL_LINK(entry, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200348 entry->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100349 pool_free_area(temp, entry->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200350 }
351 entry->free_list = next;
Christopher Fauletb349e482017-08-29 09:52:38 +0200352 if (entry != pool_ctx)
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100353 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200354 }
Christopher Fauletb349e482017-08-29 09:52:38 +0200355
356 HA_ATOMIC_STORE(&recurse, 0);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200357}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100358#endif
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200359
360/*
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200361 * This function destroys a pool by freeing it completely, unless it's still
362 * in use. This should be called only under extreme circumstances. It always
363 * returns NULL if the resulting pool is empty, easing the clearing of the old
364 * pointer, otherwise it returns the pool.
365 * .
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200366 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100367void *pool_destroy(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200368{
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200369 if (pool) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100370 pool_flush(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200371 if (pool->used)
372 return pool;
373 pool->users--;
374 if (!pool->users) {
375 LIST_DEL(&pool->list);
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100376#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100377 HA_SPIN_DESTROY(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100378#endif
Willy Tarreau0a93b642018-10-16 07:58:39 +0200379 if ((pool - pool_base_start) < MAX_BASE_POOLS)
380 memset(pool, 0, sizeof(*pool));
381 else
382 free(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200383 }
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200384 }
385 return NULL;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200386}
387
Willy Tarreau12833bb2014-01-28 16:49:56 +0100388/* This function dumps memory usage information into the trash buffer. */
389void dump_pools_to_trash()
Willy Tarreau50e608d2007-05-13 18:26:08 +0200390{
391 struct pool_head *entry;
392 unsigned long allocated, used;
393 int nbpools;
394
395 allocated = used = nbpools = 0;
Willy Tarreau12833bb2014-01-28 16:49:56 +0100396 chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200397 list_for_each_entry(entry, &pools, list) {
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100398#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100399 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100400#endif
Willy Tarreau0a93b642018-10-16 07:58:39 +0200401 chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users, @%p=%02d%s\n",
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200402 entry->name, entry->size, entry->allocated,
Willy Tarreau58102cf2015-10-28 16:24:21 +0100403 entry->size * entry->allocated, entry->used, entry->failed,
Willy Tarreau0a93b642018-10-16 07:58:39 +0200404 entry->users, entry, (int)pool_get_index(entry),
405 (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200406
407 allocated += entry->allocated * entry->size;
408 used += entry->used * entry->size;
409 nbpools++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100410#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100411 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100412#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200413 }
Willy Tarreau12833bb2014-01-28 16:49:56 +0100414 chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
Willy Tarreau50e608d2007-05-13 18:26:08 +0200415 nbpools, allocated, used);
416}
417
Willy Tarreau12833bb2014-01-28 16:49:56 +0100418/* Dump statistics on pools usage. */
419void dump_pools(void)
420{
421 dump_pools_to_trash();
Willy Tarreau843b7cb2018-07-13 10:54:26 +0200422 qfprintf(stderr, "%s", trash.area);
Willy Tarreau12833bb2014-01-28 16:49:56 +0100423}
424
Willy Tarreau58102cf2015-10-28 16:24:21 +0100425/* This function returns the total number of failed pool allocations */
426int pool_total_failures()
427{
428 struct pool_head *entry;
429 int failed = 0;
430
431 list_for_each_entry(entry, &pools, list)
432 failed += entry->failed;
433 return failed;
434}
435
436/* This function returns the total amount of memory allocated in pools (in bytes) */
437unsigned long pool_total_allocated()
438{
439 struct pool_head *entry;
440 unsigned long allocated = 0;
441
442 list_for_each_entry(entry, &pools, list)
443 allocated += entry->allocated * entry->size;
444 return allocated;
445}
446
447/* This function returns the total amount of memory used in pools (in bytes) */
448unsigned long pool_total_used()
449{
450 struct pool_head *entry;
451 unsigned long used = 0;
452
453 list_for_each_entry(entry, &pools, list)
454 used += entry->used * entry->size;
455 return used;
456}
457
William Lallemande7ed8852016-11-19 02:25:36 +0100458/* This function dumps memory usage information onto the stream interface's
459 * read buffer. It returns 0 as long as it does not complete, non-zero upon
460 * completion. No state is used.
461 */
462static int cli_io_handler_dump_pools(struct appctx *appctx)
463{
464 struct stream_interface *si = appctx->owner;
465
466 dump_pools_to_trash();
Willy Tarreau06d80a92017-10-19 14:32:15 +0200467 if (ci_putchk(si_ic(si), &trash) == -1) {
William Lallemande7ed8852016-11-19 02:25:36 +0100468 si_applet_cant_put(si);
469 return 0;
470 }
471 return 1;
472}
473
William Lallemande7ed8852016-11-19 02:25:36 +0100474/* register cli keywords */
475static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaue9ecec82016-12-16 18:55:23 +0100476 { { "show", "pools", NULL }, "show pools : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
William Lallemande7ed8852016-11-19 02:25:36 +0100477 {{},}
478}};
479
480__attribute__((constructor))
481static void __memory_init(void)
482{
483 cli_register_kw(&cli_kws);
484}
485
Willy Tarreau50e608d2007-05-13 18:26:08 +0200486/*
487 * Local variables:
488 * c-indent-level: 8
489 * c-basic-offset: 8
490 * End:
491 */