blob: 2d1a5e73166a0d3147552d6add04500f9167858d [file] [log] [blame]
Willy Tarreau50e608d2007-05-13 18:26:08 +02001/*
2 * Memory management functions.
3 *
4 * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
William Lallemande7ed8852016-11-19 02:25:36 +010013#include <types/applet.h>
14#include <types/cli.h>
Willy Tarreau12833bb2014-01-28 16:49:56 +010015#include <types/global.h>
William Lallemande7ed8852016-11-19 02:25:36 +010016#include <types/stats.h>
17
Willy Tarreau50e608d2007-05-13 18:26:08 +020018#include <common/config.h>
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020019#include <common/debug.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020020#include <common/memory.h>
21#include <common/mini-clist.h>
22#include <common/standard.h>
23
William Lallemande7ed8852016-11-19 02:25:36 +010024#include <proto/applet.h>
25#include <proto/cli.h>
26#include <proto/channel.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020027#include <proto/log.h>
William Lallemande7ed8852016-11-19 02:25:36 +010028#include <proto/stream_interface.h>
29#include <proto/stats.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020030
31static struct list pools = LIST_HEAD_INIT(pools);
Willy Tarreau067ac9f2015-10-08 14:12:13 +020032int mem_poison_byte = -1;
Willy Tarreau50e608d2007-05-13 18:26:08 +020033
34/* Try to find an existing shared pool with the same characteristics and
35 * returns it, otherwise creates this one. NULL is returned if no memory
Willy Tarreau581bf812016-01-25 02:19:13 +010036 * is available for a new creation. Two flags are supported :
37 * - MEM_F_SHARED to indicate that the pool may be shared with other users
38 * - MEM_F_EXACT to indicate that the size must not be rounded up
Willy Tarreau50e608d2007-05-13 18:26:08 +020039 */
40struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
41{
42 struct pool_head *pool;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020043 struct pool_head *entry;
44 struct list *start;
Willy Tarreau50e608d2007-05-13 18:26:08 +020045 unsigned int align;
46
Willy Tarreauac421112015-10-28 15:09:29 +010047 /* We need to store a (void *) at the end of the chunks. Since we know
Willy Tarreau50e608d2007-05-13 18:26:08 +020048 * that the malloc() function will never return such a small size,
49 * let's round the size up to something slightly bigger, in order to
50 * ease merging of entries. Note that the rounding is a power of two.
Willy Tarreauac421112015-10-28 15:09:29 +010051 * This extra (void *) is not accounted for in the size computation
52 * so that the visible parts outside are not affected.
Willy Tarreau50e608d2007-05-13 18:26:08 +020053 */
54
Willy Tarreau581bf812016-01-25 02:19:13 +010055 if (!(flags & MEM_F_EXACT)) {
56 align = 16;
57 size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
58 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020059
Christopher Fauletb349e482017-08-29 09:52:38 +020060 /* TODO: thread: we do not lock pool list for now because all pools are
61 * created during HAProxy startup (so before threads creation) */
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020062 start = &pools;
Willy Tarreau50e608d2007-05-13 18:26:08 +020063 pool = NULL;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020064
65 list_for_each_entry(entry, &pools, list) {
66 if (entry->size == size) {
67 /* either we can share this place and we take it, or
68 * we look for a sharable one or for the next position
69 * before which we will insert a new one.
70 */
71 if (flags & entry->flags & MEM_F_SHARED) {
72 /* we can share this one */
Willy Tarreau50e608d2007-05-13 18:26:08 +020073 pool = entry;
Krzysztof Piotr Oledzkia643baf2008-05-29 23:53:44 +020074 DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
Willy Tarreau50e608d2007-05-13 18:26:08 +020075 break;
76 }
77 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020078 else if (entry->size > size) {
79 /* insert before this one */
80 start = &entry->list;
81 break;
82 }
Willy Tarreau50e608d2007-05-13 18:26:08 +020083 }
84
85 if (!pool) {
David Carlierb781dbe2017-07-21 08:44:40 +010086 pool = calloc(1, sizeof(*pool));
Willy Tarreau50e608d2007-05-13 18:26:08 +020087 if (!pool)
88 return NULL;
89 if (name)
90 strlcpy2(pool->name, name, sizeof(pool->name));
91 pool->size = size;
92 pool->flags = flags;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020093 LIST_ADDQ(start, &pool->list);
Willy Tarreau50e608d2007-05-13 18:26:08 +020094 }
Willy Tarreau7dcd46d2007-05-14 00:16:13 +020095 pool->users++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +010096#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +010097 HA_SPIN_INIT(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +010098#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +020099 return pool;
100}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100101
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100102#ifdef CONFIG_HAP_LOCKLESS_POOLS
Olivier Houchardcf975d42018-01-24 18:38:31 +0100103/* Allocates new entries for pool <pool> until there are at least <avail> + 1
104 * available, then returns the last one for immediate use, so that at least
105 * <avail> are left available in the pool upon return. NULL is returned if the
106 * last entry could not be allocated. It's important to note that at least one
107 * allocation is always performed even if there are enough entries in the pool.
108 * A call to the garbage collector is performed at most once in case malloc()
109 * returns an error, before returning NULL.
110 */
111void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
112{
113 void *ptr = NULL, *free_list;
114 int failed = 0;
115 int size = pool->size;
116 int limit = pool->limit;
117 int allocated = pool->allocated, allocated_orig = allocated;
118
119 /* stop point */
120 avail += pool->used;
121
122 while (1) {
123 if (limit && allocated >= limit) {
124 HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
125 return NULL;
126 }
127
128 ptr = malloc(size + POOL_EXTRA);
129 if (!ptr) {
130 HA_ATOMIC_ADD(&pool->failed, 1);
131 if (failed)
132 return NULL;
133 failed++;
134 pool_gc(pool);
135 continue;
136 }
137 if (++allocated > avail)
138 break;
139
140 free_list = pool->free_list;
141 do {
142 *POOL_LINK(pool, ptr) = free_list;
143 __ha_barrier_store();
144 } while (HA_ATOMIC_CAS(&pool->free_list, (void *)&free_list, ptr) == 0);
145 }
146
147 HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
148 HA_ATOMIC_ADD(&pool->used, 1);
149
150#ifdef DEBUG_MEMORY_POOLS
151 /* keep track of where the element was allocated from */
152 *POOL_LINK(pool, ptr) = (void *)pool;
153#endif
154 return ptr;
155}
156void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
157{
158 void *ptr;
159
160 ptr = __pool_refill_alloc(pool, avail);
161 return ptr;
162}
163/*
164 * This function frees whatever can be freed in pool <pool>.
165 */
166void pool_flush(struct pool_head *pool)
167{
168 void *next, *temp;
169 int removed = 0;
170
171 if (!pool)
172 return;
173 do {
174 next = pool->free_list;
175 } while (!HA_ATOMIC_CAS(&pool->free_list, (void *)&next, NULL));
176 while (next) {
177 temp = next;
178 next = *POOL_LINK(pool, temp);
179 removed++;
180 free(temp);
181 }
182 pool->free_list = next;
183 HA_ATOMIC_SUB(&pool->allocated, removed);
184 /* here, we should have pool->allocate == pool->used */
185}
186
187/*
188 * This function frees whatever can be freed in all pools, but respecting
189 * the minimum thresholds imposed by owners. It takes care of avoiding
190 * recursion because it may be called from a signal handler.
191 *
192 * <pool_ctx> is unused
193 */
194void pool_gc(struct pool_head *pool_ctx)
195{
196 static int recurse;
197 int cur_recurse = 0;
198 struct pool_head *entry;
199
200 if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
201 return;
202
203 list_for_each_entry(entry, &pools, list) {
204 while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
205 struct pool_free_list cmp, new;
206
207 cmp.seq = entry->seq;
208 __ha_barrier_load();
209 cmp.free_list = entry->free_list;
210 __ha_barrier_load();
211 if (cmp.free_list == NULL)
212 break;
213 new.free_list = *POOL_LINK(entry, cmp.free_list);
214 new.seq = cmp.seq + 1;
215 if (__ha_cas_dw(&entry->free_list, &cmp, &new) == 0)
216 continue;
217 free(cmp.free_list);
218 HA_ATOMIC_SUB(&entry->allocated, 1);
219 }
220 }
221
222 HA_ATOMIC_STORE(&recurse, 0);
223}
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100224#else /* CONFIG_HAP_LOCKLESS_POOLS */
Willy Tarreau50e608d2007-05-13 18:26:08 +0200225
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100226/* Allocates new entries for pool <pool> until there are at least <avail> + 1
227 * available, then returns the last one for immediate use, so that at least
228 * <avail> are left available in the pool upon return. NULL is returned if the
229 * last entry could not be allocated. It's important to note that at least one
230 * allocation is always performed even if there are enough entries in the pool.
231 * A call to the garbage collector is performed at most once in case malloc()
232 * returns an error, before returning NULL.
Willy Tarreau50e608d2007-05-13 18:26:08 +0200233 */
Christopher Fauletb349e482017-08-29 09:52:38 +0200234void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
Willy Tarreau50e608d2007-05-13 18:26:08 +0200235{
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100236 void *ptr = NULL;
237 int failed = 0;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200238
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100239 /* stop point */
240 avail += pool->used;
241
242 while (1) {
243 if (pool->limit && pool->allocated >= pool->limit)
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200244 return NULL;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100245
Willy Tarreauf13322e2017-11-22 10:50:54 +0100246 ptr = pool_alloc_area(pool->size + POOL_EXTRA);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100247 if (!ptr) {
Willy Tarreau58102cf2015-10-28 16:24:21 +0100248 pool->failed++;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100249 if (failed)
250 return NULL;
251 failed++;
Willy Tarreaubafbe012017-11-24 17:34:44 +0100252 pool_gc(pool);
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100253 continue;
254 }
255 if (++pool->allocated > avail)
256 break;
257
Willy Tarreauac421112015-10-28 15:09:29 +0100258 *POOL_LINK(pool, ptr) = (void *)pool->free_list;
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100259 pool->free_list = ptr;
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200260 }
Willy Tarreau50e608d2007-05-13 18:26:08 +0200261 pool->used++;
Willy Tarreaude30a682015-10-28 15:23:51 +0100262#ifdef DEBUG_MEMORY_POOLS
263 /* keep track of where the element was allocated from */
264 *POOL_LINK(pool, ptr) = (void *)pool;
265#endif
Willy Tarreaua885f6d2014-12-03 15:25:28 +0100266 return ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200267}
Christopher Fauletb349e482017-08-29 09:52:38 +0200268void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
269{
270 void *ptr;
Willy Tarreau50e608d2007-05-13 18:26:08 +0200271
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100272 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200273 ptr = __pool_refill_alloc(pool, avail);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100274 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Christopher Fauletb349e482017-08-29 09:52:38 +0200275 return ptr;
276}
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200277/*
278 * This function frees whatever can be freed in pool <pool>.
279 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100280void pool_flush(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200281{
282 void *temp, *next;
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200283 if (!pool)
284 return;
285
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100286 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200287 next = pool->free_list;
288 while (next) {
289 temp = next;
Willy Tarreauac421112015-10-28 15:09:29 +0100290 next = *POOL_LINK(pool, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200291 pool->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100292 pool_free_area(temp, pool->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200293 }
294 pool->free_list = next;
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100295 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200296 /* here, we should have pool->allocate == pool->used */
297}
298
299/*
300 * This function frees whatever can be freed in all pools, but respecting
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200301 * the minimum thresholds imposed by owners. It takes care of avoiding
302 * recursion because it may be called from a signal handler.
Christopher Fauletb349e482017-08-29 09:52:38 +0200303 *
Willy Tarreaubafbe012017-11-24 17:34:44 +0100304 * <pool_ctx> is used when pool_gc is called to release resources to allocate
Christopher Fauletb349e482017-08-29 09:52:38 +0200305 * an element in __pool_refill_alloc. It is important because <pool_ctx> is
306 * already locked, so we need to skip the lock here.
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200307 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100308void pool_gc(struct pool_head *pool_ctx)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200309{
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200310 static int recurse;
Christopher Fauletb349e482017-08-29 09:52:38 +0200311 int cur_recurse = 0;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200312 struct pool_head *entry;
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200313
Christopher Fauletb349e482017-08-29 09:52:38 +0200314 if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
315 return;
Willy Tarreaub7f9d122009-04-21 02:17:45 +0200316
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200317 list_for_each_entry(entry, &pools, list) {
318 void *temp, *next;
319 //qfprintf(stderr, "Flushing pool %s\n", entry->name);
Christopher Fauletb349e482017-08-29 09:52:38 +0200320 if (entry != pool_ctx)
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100321 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200322 next = entry->free_list;
323 while (next &&
Willy Tarreau57767b82014-12-22 21:40:55 +0100324 (int)(entry->allocated - entry->used) > (int)entry->minavail) {
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200325 temp = next;
Willy Tarreauac421112015-10-28 15:09:29 +0100326 next = *POOL_LINK(entry, temp);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200327 entry->allocated--;
Willy Tarreauf13322e2017-11-22 10:50:54 +0100328 pool_free_area(temp, entry->size + POOL_EXTRA);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200329 }
330 entry->free_list = next;
Christopher Fauletb349e482017-08-29 09:52:38 +0200331 if (entry != pool_ctx)
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100332 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200333 }
Christopher Fauletb349e482017-08-29 09:52:38 +0200334
335 HA_ATOMIC_STORE(&recurse, 0);
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200336}
Olivier Houchardcf975d42018-01-24 18:38:31 +0100337#endif
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200338
339/*
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200340 * This function destroys a pool by freeing it completely, unless it's still
341 * in use. This should be called only under extreme circumstances. It always
342 * returns NULL if the resulting pool is empty, easing the clearing of the old
343 * pointer, otherwise it returns the pool.
344 * .
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200345 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100346void *pool_destroy(struct pool_head *pool)
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200347{
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200348 if (pool) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100349 pool_flush(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200350 if (pool->used)
351 return pool;
352 pool->users--;
353 if (!pool->users) {
354 LIST_DEL(&pool->list);
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100355#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100356 HA_SPIN_DESTROY(&pool->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100357#endif
David Carlierb781dbe2017-07-21 08:44:40 +0100358 free(pool);
Willy Tarreaudae4aa82007-06-16 23:19:53 +0200359 }
Willy Tarreau4d2d0982007-05-14 00:39:29 +0200360 }
361 return NULL;
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200362}
363
Willy Tarreau12833bb2014-01-28 16:49:56 +0100364/* This function dumps memory usage information into the trash buffer. */
365void dump_pools_to_trash()
Willy Tarreau50e608d2007-05-13 18:26:08 +0200366{
367 struct pool_head *entry;
368 unsigned long allocated, used;
369 int nbpools;
370
371 allocated = used = nbpools = 0;
Willy Tarreau12833bb2014-01-28 16:49:56 +0100372 chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200373 list_for_each_entry(entry, &pools, list) {
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100374#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100375 HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100376#endif
Willy Tarreau58102cf2015-10-28 16:24:21 +0100377 chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
Willy Tarreaue6ce59d2007-05-13 19:38:49 +0200378 entry->name, entry->size, entry->allocated,
Willy Tarreau58102cf2015-10-28 16:24:21 +0100379 entry->size * entry->allocated, entry->used, entry->failed,
Willy Tarreau7dcd46d2007-05-14 00:16:13 +0200380 entry->users, (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
Willy Tarreau50e608d2007-05-13 18:26:08 +0200381
382 allocated += entry->allocated * entry->size;
383 used += entry->used * entry->size;
384 nbpools++;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100385#ifndef CONFIG_HAP_LOCKLESS_POOLS
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100386 HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100387#endif
Willy Tarreau50e608d2007-05-13 18:26:08 +0200388 }
Willy Tarreau12833bb2014-01-28 16:49:56 +0100389 chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
Willy Tarreau50e608d2007-05-13 18:26:08 +0200390 nbpools, allocated, used);
391}
392
Willy Tarreau12833bb2014-01-28 16:49:56 +0100393/* Dump statistics on pools usage. */
394void dump_pools(void)
395{
396 dump_pools_to_trash();
397 qfprintf(stderr, "%s", trash.str);
398}
399
Willy Tarreau58102cf2015-10-28 16:24:21 +0100400/* This function returns the total number of failed pool allocations */
401int pool_total_failures()
402{
403 struct pool_head *entry;
404 int failed = 0;
405
406 list_for_each_entry(entry, &pools, list)
407 failed += entry->failed;
408 return failed;
409}
410
411/* This function returns the total amount of memory allocated in pools (in bytes) */
412unsigned long pool_total_allocated()
413{
414 struct pool_head *entry;
415 unsigned long allocated = 0;
416
417 list_for_each_entry(entry, &pools, list)
418 allocated += entry->allocated * entry->size;
419 return allocated;
420}
421
422/* This function returns the total amount of memory used in pools (in bytes) */
423unsigned long pool_total_used()
424{
425 struct pool_head *entry;
426 unsigned long used = 0;
427
428 list_for_each_entry(entry, &pools, list)
429 used += entry->used * entry->size;
430 return used;
431}
432
William Lallemande7ed8852016-11-19 02:25:36 +0100433/* This function dumps memory usage information onto the stream interface's
434 * read buffer. It returns 0 as long as it does not complete, non-zero upon
435 * completion. No state is used.
436 */
437static int cli_io_handler_dump_pools(struct appctx *appctx)
438{
439 struct stream_interface *si = appctx->owner;
440
441 dump_pools_to_trash();
Willy Tarreau06d80a92017-10-19 14:32:15 +0200442 if (ci_putchk(si_ic(si), &trash) == -1) {
William Lallemande7ed8852016-11-19 02:25:36 +0100443 si_applet_cant_put(si);
444 return 0;
445 }
446 return 1;
447}
448
William Lallemande7ed8852016-11-19 02:25:36 +0100449/* register cli keywords */
450static struct cli_kw_list cli_kws = {{ },{
Willy Tarreaue9ecec82016-12-16 18:55:23 +0100451 { { "show", "pools", NULL }, "show pools : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
William Lallemande7ed8852016-11-19 02:25:36 +0100452 {{},}
453}};
454
455__attribute__((constructor))
456static void __memory_init(void)
457{
458 cli_register_kw(&cli_kws);
459}
460
Willy Tarreau50e608d2007-05-13 18:26:08 +0200461/*
462 * Local variables:
463 * c-indent-level: 8
464 * c-basic-offset: 8
465 * End:
466 */