blob: 63decfe7d17d82cb412c993469543e444891b44a [file] [log] [blame]
Willy Tarreau609aad92018-11-22 08:31:09 +01001/*
2 * activity measurement functions.
3 *
4 * Copyright 2000-2018 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreaub2551052020-06-09 09:07:15 +020013#include <haproxy/activity-t.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020014#include <haproxy/api.h>
Willy Tarreaue8d006a2022-05-05 14:19:00 +020015#include <haproxy/applet.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020016#include <haproxy/cfgparse.h>
Willy Tarreau55542642021-10-08 09:33:24 +020017#include <haproxy/clock.h>
Willy Tarreauf1d32c42020-06-04 21:07:02 +020018#include <haproxy/channel.h>
Willy Tarreau83487a82020-06-04 20:19:54 +020019#include <haproxy/cli.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020020#include <haproxy/freq_ctr.h>
Willy Tarreauf9607f82022-11-25 15:32:38 +010021#include <haproxy/listener.h>
Willy Tarreau5edca2f2022-05-27 09:25:10 +020022#include <haproxy/sc_strm.h>
Willy Tarreaucb086c62022-05-27 09:47:12 +020023#include <haproxy/stconn.h>
Willy Tarreau48fbcae2020-06-03 18:09:46 +020024#include <haproxy/tools.h>
Willy Tarreau75c62c22018-11-22 11:02:09 +010025
Willy Tarreaue8d006a2022-05-05 14:19:00 +020026/* CLI context for the "show profiling" command */
27struct show_prof_ctx {
28 int dump_step; /* 0,1,2,4,5,6; see cli_iohandler_show_profiling() */
29 int linenum; /* next line to be dumped (starts at 0) */
30 int maxcnt; /* max line count per step (0=not set) */
Willy Tarreaue86bc352022-09-08 16:38:10 +020031 int by_what; /* 0=sort by usage, 1=sort by address, 2=sort by time */
Willy Tarreaudc89b182022-09-08 16:05:57 +020032 int aggr; /* 0=dump raw, 1=aggregate on callee */
Willy Tarreaue8d006a2022-05-05 14:19:00 +020033};
34
Willy Tarreauf9607f82022-11-25 15:32:38 +010035/* CLI context for the "show activity" command */
36struct show_activity_ctx {
37 int thr; /* thread ID to show or -1 for all */
Willy Tarreau6ed0b982023-05-03 15:52:19 +020038 int line; /* line number being dumped */
Willy Tarreauf9607f82022-11-25 15:32:38 +010039};
40
Willy Tarreauf93c7be2021-05-05 17:07:09 +020041#if defined(DEBUG_MEM_STATS)
42/* these ones are macros in bug.h when DEBUG_MEM_STATS is set, and will
43 * prevent the new ones from being redefined.
44 */
45#undef calloc
46#undef malloc
47#undef realloc
48#endif
Willy Tarreau75c62c22018-11-22 11:02:09 +010049
50/* bit field of profiling options. Beware, may be modified at runtime! */
Willy Tarreauef7380f2021-05-05 16:28:31 +020051unsigned int profiling __read_mostly = HA_PROF_TASKS_AOFF;
Willy Tarreau609aad92018-11-22 08:31:09 +010052
53/* One struct per thread containing all collected measurements */
54struct activity activity[MAX_THREADS] __attribute__((aligned(64))) = { };
55
Willy Tarreaua3423872022-09-07 18:49:55 +020056/* One struct per function pointer hash entry (SCHED_ACT_HASH_BUCKETS values, 0=collision) */
57struct sched_activity sched_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64))) = { };
Willy Tarreau609aad92018-11-22 08:31:09 +010058
Willy Tarreaudb87fc72021-05-05 16:50:40 +020059
Willy Tarreaue15615c2021-08-28 12:04:25 +020060#ifdef USE_MEMORY_PROFILING
Willy Tarreau616491b2021-05-11 09:26:23 +020061
62static const char *const memprof_methods[MEMPROF_METH_METHODS] = {
Willy Tarreaufacfad22022-08-17 09:12:53 +020063 "unknown", "malloc", "calloc", "realloc", "free", "p_alloc", "p_free",
Willy Tarreau616491b2021-05-11 09:26:23 +020064};
65
Willy Tarreaudb87fc72021-05-05 16:50:40 +020066/* last one is for hash collisions ("others") and has no caller address */
67struct memprof_stats memprof_stats[MEMPROF_HASH_BUCKETS + 1] = { };
68
Willy Tarreauf93c7be2021-05-05 17:07:09 +020069/* used to detect recursive calls */
70static THREAD_LOCAL int in_memprof = 0;
71
Willy Tarreauf93c7be2021-05-05 17:07:09 +020072/* These ones are used by glibc and will be called early. They are in charge of
73 * initializing the handlers with the original functions.
74 */
75static void *memprof_malloc_initial_handler(size_t size);
76static void *memprof_calloc_initial_handler(size_t nmemb, size_t size);
77static void *memprof_realloc_initial_handler(void *ptr, size_t size);
78static void memprof_free_initial_handler(void *ptr);
79
80/* Fallback handlers for the main alloc/free functions. They are preset to
81 * the initializer in order to save a test in the functions's critical path.
82 */
83static void *(*memprof_malloc_handler)(size_t size) = memprof_malloc_initial_handler;
84static void *(*memprof_calloc_handler)(size_t nmemb, size_t size) = memprof_calloc_initial_handler;
85static void *(*memprof_realloc_handler)(void *ptr, size_t size) = memprof_realloc_initial_handler;
86static void (*memprof_free_handler)(void *ptr) = memprof_free_initial_handler;
87
88/* Used to force to die if it's not possible to retrieve the allocation
89 * functions. We cannot even use stdio in this case.
90 */
91static __attribute__((noreturn)) void memprof_die(const char *msg)
92{
93 DISGUISE(write(2, msg, strlen(msg)));
94 exit(1);
95}
96
97/* Resolve original allocation functions and initialize all handlers.
98 * This must be called very early at boot, before the very first malloc()
99 * call, and is not thread-safe! It's not even possible to use stdio there.
100 * Worse, we have to account for the risk of reentrance from dlsym() when
101 * it tries to prepare its error messages. Here its ahndled by in_memprof
102 * that makes allocators return NULL. dlsym() handles it gracefully. An
Ilya Shipitsin3df59892021-05-10 12:50:00 +0500103 * alternate approach consists in calling aligned_alloc() from these places
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200104 * but that would mean not being able to intercept it later if considered
105 * useful to do so.
106 */
107static void memprof_init()
108{
109 in_memprof++;
110 memprof_malloc_handler = get_sym_next_addr("malloc");
111 if (!memprof_malloc_handler)
112 memprof_die("FATAL: malloc() function not found.\n");
113
114 memprof_calloc_handler = get_sym_next_addr("calloc");
115 if (!memprof_calloc_handler)
116 memprof_die("FATAL: calloc() function not found.\n");
117
118 memprof_realloc_handler = get_sym_next_addr("realloc");
119 if (!memprof_realloc_handler)
120 memprof_die("FATAL: realloc() function not found.\n");
121
122 memprof_free_handler = get_sym_next_addr("free");
123 if (!memprof_free_handler)
124 memprof_die("FATAL: free() function not found.\n");
125 in_memprof--;
126}
127
128/* the initial handlers will initialize all regular handlers and will call the
129 * one they correspond to. A single one of these functions will typically be
130 * called, though it's unknown which one (as any might be called before main).
131 */
132static void *memprof_malloc_initial_handler(size_t size)
133{
134 if (in_memprof) {
135 /* it's likely that dlsym() needs malloc(), let's fail */
136 return NULL;
137 }
138
139 memprof_init();
140 return memprof_malloc_handler(size);
141}
142
143static void *memprof_calloc_initial_handler(size_t nmemb, size_t size)
144{
145 if (in_memprof) {
146 /* it's likely that dlsym() needs calloc(), let's fail */
147 return NULL;
148 }
149 memprof_init();
150 return memprof_calloc_handler(nmemb, size);
151}
152
153static void *memprof_realloc_initial_handler(void *ptr, size_t size)
154{
155 if (in_memprof) {
156 /* it's likely that dlsym() needs realloc(), let's fail */
157 return NULL;
158 }
159
160 memprof_init();
161 return memprof_realloc_handler(ptr, size);
162}
163
164static void memprof_free_initial_handler(void *ptr)
165{
166 memprof_init();
167 memprof_free_handler(ptr);
168}
169
170/* Assign a bin for the memprof_stats to the return address. May perform a few
171 * attempts before finding the right one, but always succeeds (in the worst
172 * case, returns a default bin). The caller address is atomically set except
173 * for the default one which is never set.
174 */
Willy Tarreau219afa22022-08-17 08:53:36 +0200175struct memprof_stats *memprof_get_bin(const void *ra, enum memprof_method meth)
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200176{
177 int retries = 16; // up to 16 consecutive entries may be tested.
Willy Tarreau4a753282021-05-09 23:18:50 +0200178 const void *old;
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200179 unsigned int bin;
180
Willy Tarreau245d32f2022-09-07 11:20:01 +0200181 bin = ptr_hash(ra, MEMPROF_HASH_BITS);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200182 for (; memprof_stats[bin].caller != ra; bin = (bin + 1) & (MEMPROF_HASH_BUCKETS - 1)) {
183 if (!--retries) {
184 bin = MEMPROF_HASH_BUCKETS;
185 break;
186 }
187
188 old = NULL;
189 if (!memprof_stats[bin].caller &&
Willy Tarreau616491b2021-05-11 09:26:23 +0200190 HA_ATOMIC_CAS(&memprof_stats[bin].caller, &old, ra)) {
191 memprof_stats[bin].method = meth;
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200192 break;
Willy Tarreau616491b2021-05-11 09:26:23 +0200193 }
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200194 }
195 return &memprof_stats[bin];
196}
197
198/* This is the new global malloc() function. It must optimize for the normal
199 * case (i.e. profiling disabled) hence the first test to permit a direct jump.
200 * It must remain simple to guarantee the lack of reentrance. stdio is not
201 * possible there even for debugging. The reported size is the really allocated
202 * one as returned by malloc_usable_size(), because this will allow it to be
203 * compared to the one before realloc() or free(). This is a GNU and jemalloc
204 * extension but other systems may also store this size in ptr[-1].
205 */
206void *malloc(size_t size)
207{
208 struct memprof_stats *bin;
209 void *ret;
210
211 if (likely(!(profiling & HA_PROF_MEMORY)))
212 return memprof_malloc_handler(size);
213
214 ret = memprof_malloc_handler(size);
Willy Tarreau1de51eb2021-10-22 16:33:53 +0200215 size = malloc_usable_size(ret) + sizeof(void *);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200216
Willy Tarreau616491b2021-05-11 09:26:23 +0200217 bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_MALLOC);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200218 _HA_ATOMIC_ADD(&bin->alloc_calls, 1);
219 _HA_ATOMIC_ADD(&bin->alloc_tot, size);
220 return ret;
221}
222
223/* This is the new global calloc() function. It must optimize for the normal
224 * case (i.e. profiling disabled) hence the first test to permit a direct jump.
225 * It must remain simple to guarantee the lack of reentrance. stdio is not
226 * possible there even for debugging. The reported size is the really allocated
227 * one as returned by malloc_usable_size(), because this will allow it to be
228 * compared to the one before realloc() or free(). This is a GNU and jemalloc
229 * extension but other systems may also store this size in ptr[-1].
230 */
231void *calloc(size_t nmemb, size_t size)
232{
233 struct memprof_stats *bin;
234 void *ret;
235
236 if (likely(!(profiling & HA_PROF_MEMORY)))
237 return memprof_calloc_handler(nmemb, size);
238
239 ret = memprof_calloc_handler(nmemb, size);
Willy Tarreau1de51eb2021-10-22 16:33:53 +0200240 size = malloc_usable_size(ret) + sizeof(void *);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200241
Willy Tarreau616491b2021-05-11 09:26:23 +0200242 bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_CALLOC);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200243 _HA_ATOMIC_ADD(&bin->alloc_calls, 1);
244 _HA_ATOMIC_ADD(&bin->alloc_tot, size);
245 return ret;
246}
247
248/* This is the new global realloc() function. It must optimize for the normal
249 * case (i.e. profiling disabled) hence the first test to permit a direct jump.
250 * It must remain simple to guarantee the lack of reentrance. stdio is not
251 * possible there even for debugging. The reported size is the really allocated
252 * one as returned by malloc_usable_size(), because this will allow it to be
253 * compared to the one before realloc() or free(). This is a GNU and jemalloc
254 * extension but other systems may also store this size in ptr[-1].
255 * Depending on the old vs new size, it's considered as an allocation or a free
256 * (or neither if the size remains the same).
257 */
258void *realloc(void *ptr, size_t size)
259{
260 struct memprof_stats *bin;
261 size_t size_before;
262 void *ret;
263
264 if (likely(!(profiling & HA_PROF_MEMORY)))
265 return memprof_realloc_handler(ptr, size);
266
267 size_before = malloc_usable_size(ptr);
268 ret = memprof_realloc_handler(ptr, size);
Willy Tarreau2639e2e2021-05-07 08:01:35 +0200269 size = malloc_usable_size(ret);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200270
Willy Tarreau1de51eb2021-10-22 16:33:53 +0200271 /* only count the extra link for new allocations */
272 if (!ptr)
273 size += sizeof(void *);
274
Willy Tarreau616491b2021-05-11 09:26:23 +0200275 bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_REALLOC);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200276 if (size > size_before) {
277 _HA_ATOMIC_ADD(&bin->alloc_calls, 1);
Willy Tarreau79acefa2021-05-11 09:12:56 +0200278 _HA_ATOMIC_ADD(&bin->alloc_tot, size - size_before);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200279 } else if (size < size_before) {
280 _HA_ATOMIC_ADD(&bin->free_calls, 1);
Willy Tarreau79acefa2021-05-11 09:12:56 +0200281 _HA_ATOMIC_ADD(&bin->free_tot, size_before - size);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200282 }
283 return ret;
284}
285
286/* This is the new global free() function. It must optimize for the normal
287 * case (i.e. profiling disabled) hence the first test to permit a direct jump.
288 * It must remain simple to guarantee the lack of reentrance. stdio is not
289 * possible there even for debugging. The reported size is the really allocated
290 * one as returned by malloc_usable_size(), because this will allow it to be
291 * compared to the one before realloc() or free(). This is a GNU and jemalloc
292 * extension but other systems may also store this size in ptr[-1]. Since
293 * free() is often called on NULL pointers to collect garbage at the end of
294 * many functions or during config parsing, as a special case free(NULL)
295 * doesn't update any stats.
296 */
297void free(void *ptr)
298{
299 struct memprof_stats *bin;
300 size_t size_before;
301
302 if (likely(!(profiling & HA_PROF_MEMORY) || !ptr)) {
303 memprof_free_handler(ptr);
304 return;
305 }
306
Willy Tarreau1de51eb2021-10-22 16:33:53 +0200307 size_before = malloc_usable_size(ptr) + sizeof(void *);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200308 memprof_free_handler(ptr);
309
Willy Tarreau616491b2021-05-11 09:26:23 +0200310 bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_FREE);
Willy Tarreauf93c7be2021-05-05 17:07:09 +0200311 _HA_ATOMIC_ADD(&bin->free_calls, 1);
312 _HA_ATOMIC_ADD(&bin->free_tot, size_before);
313}
314
Willy Tarreaudb87fc72021-05-05 16:50:40 +0200315#endif // USE_MEMORY_PROFILING
316
Willy Tarreau609aad92018-11-22 08:31:09 +0100317/* Updates the current thread's statistics about stolen CPU time. The unit for
318 * <stolen> is half-milliseconds.
319 */
320void report_stolen_time(uint64_t stolen)
321{
322 activity[tid].cpust_total += stolen;
323 update_freq_ctr(&activity[tid].cpust_1s, stolen);
324 update_freq_ctr_period(&activity[tid].cpust_15s, 15000, stolen);
325}
Willy Tarreau75c62c22018-11-22 11:02:09 +0100326
Willy Tarreau20adfde2021-10-08 11:34:46 +0200327/* Update avg_loop value for the current thread and possibly decide to enable
328 * task-level profiling on the current thread based on its average run time.
329 * The <run_time> argument is the number of microseconds elapsed since the
330 * last time poll() returned.
Willy Tarreaue0650222021-10-06 16:22:09 +0200331 */
Willy Tarreau20adfde2021-10-08 11:34:46 +0200332void activity_count_runtime(uint32_t run_time)
Willy Tarreaue0650222021-10-06 16:22:09 +0200333{
Willy Tarreaue0650222021-10-06 16:22:09 +0200334 uint32_t up, down;
335
336 /* 1 millisecond per loop on average over last 1024 iterations is
337 * enough to turn on profiling.
338 */
339 up = 1000;
340 down = up * 99 / 100;
341
Willy Tarreaue0650222021-10-06 16:22:09 +0200342 run_time = swrate_add(&activity[tid].avg_loop_us, TIME_STATS_SAMPLES, run_time);
343
344 /* In automatic mode, reaching the "up" threshold on average switches
345 * profiling to "on" when automatic, and going back below the "down"
346 * threshold switches to off. The forced modes don't check the load.
347 */
Willy Tarreaubdcd3252022-06-22 09:19:46 +0200348 if (!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
Willy Tarreaue0650222021-10-06 16:22:09 +0200349 if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON ||
350 ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON &&
351 swrate_avg(run_time, TIME_STATS_SAMPLES) >= up)))
Willy Tarreau680ed5f2022-06-13 15:59:39 +0200352 _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING);
Willy Tarreaue0650222021-10-06 16:22:09 +0200353 } else {
354 if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_OFF ||
355 ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AOFF &&
356 swrate_avg(run_time, TIME_STATS_SAMPLES) <= down)))
Willy Tarreau680ed5f2022-06-13 15:59:39 +0200357 _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_TASK_PROFILING);
Willy Tarreaue0650222021-10-06 16:22:09 +0200358 }
359}
360
Willy Tarreauca3afc22021-05-05 18:33:19 +0200361#ifdef USE_MEMORY_PROFILING
362/* config parser for global "profiling.memory", accepts "on" or "off" */
363static int cfg_parse_prof_memory(char **args, int section_type, struct proxy *curpx,
364 const struct proxy *defpx, const char *file, int line,
365 char **err)
366{
367 if (too_many_args(1, args, err, NULL))
368 return -1;
369
370 if (strcmp(args[1], "on") == 0)
371 profiling |= HA_PROF_MEMORY;
372 else if (strcmp(args[1], "off") == 0)
373 profiling &= ~HA_PROF_MEMORY;
374 else {
375 memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
376 return -1;
377 }
378 return 0;
379}
380#endif // USE_MEMORY_PROFILING
381
Willy Tarreau75c62c22018-11-22 11:02:09 +0100382/* config parser for global "profiling.tasks", accepts "on" or "off" */
383static int cfg_parse_prof_tasks(char **args, int section_type, struct proxy *curpx,
Willy Tarreau01825162021-03-09 09:53:46 +0100384 const struct proxy *defpx, const char *file, int line,
Willy Tarreau75c62c22018-11-22 11:02:09 +0100385 char **err)
386{
387 if (too_many_args(1, args, err, NULL))
388 return -1;
389
390 if (strcmp(args[1], "on") == 0)
Willy Tarreaud2d33482019-04-25 17:09:07 +0200391 profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON;
392 else if (strcmp(args[1], "auto") == 0)
Willy Tarreauaa622b82021-01-28 21:44:22 +0100393 profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
Willy Tarreau75c62c22018-11-22 11:02:09 +0100394 else if (strcmp(args[1], "off") == 0)
Willy Tarreaud2d33482019-04-25 17:09:07 +0200395 profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF;
Willy Tarreau75c62c22018-11-22 11:02:09 +0100396 else {
Willy Tarreaud2d33482019-04-25 17:09:07 +0200397 memprintf(err, "'%s' expects either 'on', 'auto', or 'off' but got '%s'.", args[0], args[1]);
Willy Tarreau75c62c22018-11-22 11:02:09 +0100398 return -1;
399 }
400 return 0;
401}
402
403/* parse a "set profiling" command. It always returns 1. */
404static int cli_parse_set_profiling(char **args, char *payload, struct appctx *appctx, void *private)
405{
Willy Tarreau75c62c22018-11-22 11:02:09 +0100406 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
407 return 1;
408
Willy Tarreau00dd44f2021-05-05 16:44:23 +0200409 if (strcmp(args[2], "memory") == 0) {
Willy Tarreaudb87fc72021-05-05 16:50:40 +0200410#ifdef USE_MEMORY_PROFILING
411 if (strcmp(args[3], "on") == 0) {
412 unsigned int old = profiling;
413 int i;
414
415 while (!_HA_ATOMIC_CAS(&profiling, &old, old | HA_PROF_MEMORY))
416 ;
417
418 /* also flush current profiling stats */
419 for (i = 0; i < sizeof(memprof_stats) / sizeof(memprof_stats[0]); i++) {
420 HA_ATOMIC_STORE(&memprof_stats[i].alloc_calls, 0);
421 HA_ATOMIC_STORE(&memprof_stats[i].free_calls, 0);
422 HA_ATOMIC_STORE(&memprof_stats[i].alloc_tot, 0);
423 HA_ATOMIC_STORE(&memprof_stats[i].free_tot, 0);
424 HA_ATOMIC_STORE(&memprof_stats[i].caller, NULL);
425 }
426 }
427 else if (strcmp(args[3], "off") == 0) {
428 unsigned int old = profiling;
429
430 while (!_HA_ATOMIC_CAS(&profiling, &old, old & ~HA_PROF_MEMORY))
431 ;
432 }
433 else
434 return cli_err(appctx, "Expects either 'on' or 'off'.\n");
435 return 1;
436#else
Willy Tarreau00dd44f2021-05-05 16:44:23 +0200437 return cli_err(appctx, "Memory profiling not compiled in.\n");
Willy Tarreaudb87fc72021-05-05 16:50:40 +0200438#endif
Willy Tarreau00dd44f2021-05-05 16:44:23 +0200439 }
440
Willy Tarreau9d008692019-08-09 11:21:01 +0200441 if (strcmp(args[2], "tasks") != 0)
Ilya Shipitsin3df59892021-05-10 12:50:00 +0500442 return cli_err(appctx, "Expects either 'tasks' or 'memory'.\n");
Willy Tarreau75c62c22018-11-22 11:02:09 +0100443
Willy Tarreaud2d33482019-04-25 17:09:07 +0200444 if (strcmp(args[3], "on") == 0) {
445 unsigned int old = profiling;
Willy Tarreaucfa71012021-01-29 11:56:21 +0100446 int i;
447
Willy Tarreaud2d33482019-04-25 17:09:07 +0200448 while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON))
449 ;
Willy Tarreaucfa71012021-01-29 11:56:21 +0100450 /* also flush current profiling stats */
Willy Tarreaua3423872022-09-07 18:49:55 +0200451 for (i = 0; i < SCHED_ACT_HASH_BUCKETS; i++) {
Willy Tarreaucfa71012021-01-29 11:56:21 +0100452 HA_ATOMIC_STORE(&sched_activity[i].calls, 0);
453 HA_ATOMIC_STORE(&sched_activity[i].cpu_time, 0);
454 HA_ATOMIC_STORE(&sched_activity[i].lat_time, 0);
455 HA_ATOMIC_STORE(&sched_activity[i].func, NULL);
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200456 HA_ATOMIC_STORE(&sched_activity[i].caller, NULL);
Willy Tarreaucfa71012021-01-29 11:56:21 +0100457 }
Willy Tarreaud2d33482019-04-25 17:09:07 +0200458 }
459 else if (strcmp(args[3], "auto") == 0) {
460 unsigned int old = profiling;
Willy Tarreauaa622b82021-01-28 21:44:22 +0100461 unsigned int new;
462
463 do {
464 if ((old & HA_PROF_TASKS_MASK) >= HA_PROF_TASKS_AON)
465 new = (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AON;
466 else
467 new = (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
468 } while (!_HA_ATOMIC_CAS(&profiling, &old, new));
Willy Tarreaud2d33482019-04-25 17:09:07 +0200469 }
470 else if (strcmp(args[3], "off") == 0) {
471 unsigned int old = profiling;
472 while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF))
473 ;
474 }
Willy Tarreau9d008692019-08-09 11:21:01 +0200475 else
476 return cli_err(appctx, "Expects 'on', 'auto', or 'off'.\n");
477
Willy Tarreau75c62c22018-11-22 11:02:09 +0100478 return 1;
479}
480
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200481static int cmp_sched_activity_calls(const void *a, const void *b)
Willy Tarreau1bd67e92021-01-29 00:07:40 +0100482{
483 const struct sched_activity *l = (const struct sched_activity *)a;
484 const struct sched_activity *r = (const struct sched_activity *)b;
485
486 if (l->calls > r->calls)
487 return -1;
488 else if (l->calls < r->calls)
489 return 1;
490 else
491 return 0;
492}
493
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200494/* sort by address first, then by call count */
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200495static int cmp_sched_activity_addr(const void *a, const void *b)
496{
497 const struct sched_activity *l = (const struct sched_activity *)a;
498 const struct sched_activity *r = (const struct sched_activity *)b;
499
500 if (l->func > r->func)
501 return -1;
502 else if (l->func < r->func)
503 return 1;
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200504 else if (l->calls > r->calls)
505 return -1;
506 else if (l->calls < r->calls)
507 return 1;
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200508 else
509 return 0;
510}
511
Willy Tarreaue86bc352022-09-08 16:38:10 +0200512/* sort by cpu time first, then by inverse call count (to spot highest offenders) */
513static int cmp_sched_activity_cpu(const void *a, const void *b)
514{
515 const struct sched_activity *l = (const struct sched_activity *)a;
516 const struct sched_activity *r = (const struct sched_activity *)b;
517
518 if (l->cpu_time > r->cpu_time)
519 return -1;
520 else if (l->cpu_time < r->cpu_time)
521 return 1;
522 else if (l->calls < r->calls)
523 return -1;
524 else if (l->calls > r->calls)
525 return 1;
526 else
527 return 0;
528}
529
Willy Tarreaue15615c2021-08-28 12:04:25 +0200530#ifdef USE_MEMORY_PROFILING
Willy Tarreau993d44d2021-05-05 18:07:02 +0200531/* used by qsort below */
532static int cmp_memprof_stats(const void *a, const void *b)
533{
534 const struct memprof_stats *l = (const struct memprof_stats *)a;
535 const struct memprof_stats *r = (const struct memprof_stats *)b;
536
537 if (l->alloc_tot + l->free_tot > r->alloc_tot + r->free_tot)
538 return -1;
539 else if (l->alloc_tot + l->free_tot < r->alloc_tot + r->free_tot)
540 return 1;
541 else
542 return 0;
543}
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200544
545static int cmp_memprof_addr(const void *a, const void *b)
546{
547 const struct memprof_stats *l = (const struct memprof_stats *)a;
548 const struct memprof_stats *r = (const struct memprof_stats *)b;
549
550 if (l->caller > r->caller)
551 return -1;
552 else if (l->caller < r->caller)
553 return 1;
554 else
555 return 0;
556}
Willy Tarreau993d44d2021-05-05 18:07:02 +0200557#endif // USE_MEMORY_PROFILING
558
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200559/* Computes the index of function pointer <func> and caller <caller> for use
560 * with sched_activity[] or any other similar array passed in <array>, and
561 * returns a pointer to the entry after having atomically assigned it to this
562 * function pointer and caller combination. Note that in case of collision,
563 * the first entry is returned instead ("other").
Willy Tarreaua26be372021-10-06 16:26:33 +0200564 */
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200565struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func, const void *caller)
Willy Tarreaua26be372021-10-06 16:26:33 +0200566{
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200567 uint32_t hash = ptr2_hash(func, caller, SCHED_ACT_HASH_BITS);
Willy Tarreaua26be372021-10-06 16:26:33 +0200568 struct sched_activity *ret;
Willy Tarreau64435aa2022-09-07 18:54:30 +0200569 const void *old;
570 int tries = 16;
Willy Tarreaua26be372021-10-06 16:26:33 +0200571
Willy Tarreau64435aa2022-09-07 18:54:30 +0200572 for (tries = 16; tries > 0; tries--, hash++) {
573 ret = &array[hash];
Willy Tarreaua26be372021-10-06 16:26:33 +0200574
Willy Tarreau64435aa2022-09-07 18:54:30 +0200575 while (1) {
576 if (likely(ret->func)) {
577 if (likely(ret->func == func && ret->caller == caller))
578 return ret;
579 break;
580 }
Willy Tarreaua26be372021-10-06 16:26:33 +0200581
Willy Tarreau64435aa2022-09-07 18:54:30 +0200582 /* try to create the new entry. Func is sufficient to
583 * reserve the node.
584 */
585 old = NULL;
586 if (HA_ATOMIC_CAS(&ret->func, &old, func)) {
587 ret->caller = caller;
588 return ret;
589 }
590 /* changed in parallel, check again */
591 }
592 }
Willy Tarreaua26be372021-10-06 16:26:33 +0200593
594 return array;
595}
596
Willy Tarreau75c62c22018-11-22 11:02:09 +0100597/* This function dumps all profiling settings. It returns 0 if the output
598 * buffer is full and it needs to be called again, otherwise non-zero.
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200599 * It dumps some parts depending on the following states from show_prof_ctx:
600 * dump_step:
Willy Tarreau637d85a2021-05-05 17:33:27 +0200601 * 0, 4: dump status, then jump to 1 if 0
602 * 1, 5: dump tasks, then jump to 2 if 1
603 * 2, 6: dump memory, then stop
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200604 * linenum:
Willy Tarreau637d85a2021-05-05 17:33:27 +0200605 * restart line for each step (starts at zero)
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200606 * maxcnt:
Willy Tarreau637d85a2021-05-05 17:33:27 +0200607 * may contain a configured max line count for each step (0=not set)
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200608 * byaddr:
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200609 * 0: sort by usage
610 * 1: sort by address
Willy Tarreau75c62c22018-11-22 11:02:09 +0100611 */
612static int cli_io_handler_show_profiling(struct appctx *appctx)
613{
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200614 struct show_prof_ctx *ctx = appctx->svcctx;
Willy Tarreaua3423872022-09-07 18:49:55 +0200615 struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
Willy Tarreaue15615c2021-08-28 12:04:25 +0200616#ifdef USE_MEMORY_PROFILING
Willy Tarreau993d44d2021-05-05 18:07:02 +0200617 struct memprof_stats tmp_memstats[MEMPROF_HASH_BUCKETS + 1];
Willy Tarreauf5fb8582021-05-11 14:06:24 +0200618 unsigned long long tot_alloc_calls, tot_free_calls;
619 unsigned long long tot_alloc_bytes, tot_free_bytes;
Willy Tarreau993d44d2021-05-05 18:07:02 +0200620#endif
Willy Tarreauc12b3212022-05-27 11:08:15 +0200621 struct stconn *sc = appctx_sc(appctx);
Willy Tarreau1bd67e92021-01-29 00:07:40 +0100622 struct buffer *name_buffer = get_trash_chunk();
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200623 const struct ha_caller *caller;
Willy Tarreaud2d33482019-04-25 17:09:07 +0200624 const char *str;
Willy Tarreau637d85a2021-05-05 17:33:27 +0200625 int max_lines;
Willy Tarreaudc89b182022-09-08 16:05:57 +0200626 int i, j, max;
Willy Tarreau75c62c22018-11-22 11:02:09 +0100627
Christopher Faulet87633c32023-04-03 18:32:50 +0200628 /* FIXME: Don't watch the other side ! */
Christopher Faulet208c7122023-04-13 16:16:15 +0200629 if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
Willy Tarreau75c62c22018-11-22 11:02:09 +0100630 return 1;
631
632 chunk_reset(&trash);
633
Willy Tarreaud2d33482019-04-25 17:09:07 +0200634 switch (profiling & HA_PROF_TASKS_MASK) {
Willy Tarreauaa622b82021-01-28 21:44:22 +0100635 case HA_PROF_TASKS_AOFF: str="auto-off"; break;
636 case HA_PROF_TASKS_AON: str="auto-on"; break;
Willy Tarreaud2d33482019-04-25 17:09:07 +0200637 case HA_PROF_TASKS_ON: str="on"; break;
638 default: str="off"; break;
639 }
640
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200641 if ((ctx->dump_step & 3) != 0)
Willy Tarreau637d85a2021-05-05 17:33:27 +0200642 goto skip_status;
Willy Tarreau1bd67e92021-01-29 00:07:40 +0100643
Willy Tarreaud2d33482019-04-25 17:09:07 +0200644 chunk_printf(&trash,
Willy Tarreau00dd44f2021-05-05 16:44:23 +0200645 "Per-task CPU profiling : %-8s # set profiling tasks {on|auto|off}\n"
646 "Memory usage profiling : %-8s # set profiling memory {on|off}\n",
647 str, (profiling & HA_PROF_MEMORY) ? "on" : "off");
Willy Tarreau75c62c22018-11-22 11:02:09 +0100648
Willy Tarreaud0a06d52022-05-18 15:07:19 +0200649 if (applet_putchk(appctx, &trash) == -1) {
Willy Tarreau637d85a2021-05-05 17:33:27 +0200650 /* failed, try again */
Willy Tarreau637d85a2021-05-05 17:33:27 +0200651 return 0;
652 }
653
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200654 ctx->linenum = 0; // reset first line to dump
655 if ((ctx->dump_step & 4) == 0)
656 ctx->dump_step++; // next step
Willy Tarreau637d85a2021-05-05 17:33:27 +0200657
658 skip_status:
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200659 if ((ctx->dump_step & 3) != 1)
Willy Tarreau637d85a2021-05-05 17:33:27 +0200660 goto skip_tasks;
Willy Tarreau1bd67e92021-01-29 00:07:40 +0100661
Willy Tarreau637d85a2021-05-05 17:33:27 +0200662 memcpy(tmp_activity, sched_activity, sizeof(tmp_activity));
Willy Tarreaudc89b182022-09-08 16:05:57 +0200663 /* for addr sort and for callee aggregation we have to first sort by address */
Willy Tarreaue86bc352022-09-08 16:38:10 +0200664 if (ctx->aggr || ctx->by_what == 1) // sort by addr
Willy Tarreaudc89b182022-09-08 16:05:57 +0200665 qsort(tmp_activity, SCHED_ACT_HASH_BUCKETS, sizeof(tmp_activity[0]), cmp_sched_activity_addr);
666
667 if (ctx->aggr) {
668 /* merge entries for the same callee and reset their count */
669 for (i = j = 0; i < SCHED_ACT_HASH_BUCKETS; i = j) {
670 for (j = i + 1; j < SCHED_ACT_HASH_BUCKETS && tmp_activity[j].func == tmp_activity[i].func; j++) {
671 tmp_activity[i].calls += tmp_activity[j].calls;
672 tmp_activity[i].cpu_time += tmp_activity[j].cpu_time;
673 tmp_activity[i].lat_time += tmp_activity[j].lat_time;
674 tmp_activity[j].calls = 0;
675 }
676 }
677 }
678
Willy Tarreaue86bc352022-09-08 16:38:10 +0200679 if (!ctx->by_what) // sort by usage
Willy Tarreaua3423872022-09-07 18:49:55 +0200680 qsort(tmp_activity, SCHED_ACT_HASH_BUCKETS, sizeof(tmp_activity[0]), cmp_sched_activity_calls);
Willy Tarreaue86bc352022-09-08 16:38:10 +0200681 else if (ctx->by_what == 2) // by cpu_tot
682 qsort(tmp_activity, SCHED_ACT_HASH_BUCKETS, sizeof(tmp_activity[0]), cmp_sched_activity_cpu);
Willy Tarreau637d85a2021-05-05 17:33:27 +0200683
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200684 if (!ctx->linenum)
Willy Tarreau637d85a2021-05-05 17:33:27 +0200685 chunk_appendf(&trash, "Tasks activity:\n"
686 " function calls cpu_tot cpu_avg lat_tot lat_avg\n");
687
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200688 max_lines = ctx->maxcnt;
Willy Tarreau637d85a2021-05-05 17:33:27 +0200689 if (!max_lines)
Willy Tarreaua3423872022-09-07 18:49:55 +0200690 max_lines = SCHED_ACT_HASH_BUCKETS;
Willy Tarreau637d85a2021-05-05 17:33:27 +0200691
Willy Tarreaudc89b182022-09-08 16:05:57 +0200692 for (i = ctx->linenum; i < max_lines; i++) {
693 if (!tmp_activity[i].calls)
694 continue; // skip aggregated or empty entries
695
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200696 ctx->linenum = i;
Willy Tarreau1bd67e92021-01-29 00:07:40 +0100697 chunk_reset(name_buffer);
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200698 caller = HA_ATOMIC_LOAD(&tmp_activity[i].caller);
Willy Tarreau1bd67e92021-01-29 00:07:40 +0100699
700 if (!tmp_activity[i].func)
701 chunk_printf(name_buffer, "other");
702 else
703 resolve_sym_name(name_buffer, "", tmp_activity[i].func);
704
705 /* reserve 35 chars for name+' '+#calls, knowing that longer names
706 * are often used for less often called functions.
707 */
708 max = 35 - name_buffer->data;
709 if (max < 1)
710 max = 1;
711 chunk_appendf(&trash, " %s%*llu", name_buffer->area, max, (unsigned long long)tmp_activity[i].calls);
712
713 print_time_short(&trash, " ", tmp_activity[i].cpu_time, "");
714 print_time_short(&trash, " ", tmp_activity[i].cpu_time / tmp_activity[i].calls, "");
715 print_time_short(&trash, " ", tmp_activity[i].lat_time, "");
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200716 print_time_short(&trash, " ", tmp_activity[i].lat_time / tmp_activity[i].calls, "");
717
Willy Tarreaudc89b182022-09-08 16:05:57 +0200718 if (caller && !ctx->aggr && caller->what <= WAKEUP_TYPE_APPCTX_WAKEUP)
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200719 chunk_appendf(&trash, " <- %s@%s:%d %s",
720 caller->func, caller->file, caller->line,
721 task_wakeup_type_str(caller->what));
722
723 b_putchr(&trash, '\n');
Willy Tarreau637d85a2021-05-05 17:33:27 +0200724
Willy Tarreaud0a06d52022-05-18 15:07:19 +0200725 if (applet_putchk(appctx, &trash) == -1) {
Willy Tarreau637d85a2021-05-05 17:33:27 +0200726 /* failed, try again */
Willy Tarreau637d85a2021-05-05 17:33:27 +0200727 return 0;
728 }
Willy Tarreau1bd67e92021-01-29 00:07:40 +0100729 }
730
Willy Tarreaud0a06d52022-05-18 15:07:19 +0200731 if (applet_putchk(appctx, &trash) == -1) {
Willy Tarreau75c62c22018-11-22 11:02:09 +0100732 /* failed, try again */
Willy Tarreau75c62c22018-11-22 11:02:09 +0100733 return 0;
734 }
Willy Tarreau637d85a2021-05-05 17:33:27 +0200735
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200736 ctx->linenum = 0; // reset first line to dump
737 if ((ctx->dump_step & 4) == 0)
738 ctx->dump_step++; // next step
Willy Tarreau637d85a2021-05-05 17:33:27 +0200739
740 skip_tasks:
741
Willy Tarreaue15615c2021-08-28 12:04:25 +0200742#ifdef USE_MEMORY_PROFILING
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200743 if ((ctx->dump_step & 3) != 2)
Willy Tarreau993d44d2021-05-05 18:07:02 +0200744 goto skip_mem;
745
746 memcpy(tmp_memstats, memprof_stats, sizeof(tmp_memstats));
Willy Tarreaue86bc352022-09-08 16:38:10 +0200747 if (ctx->by_what)
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200748 qsort(tmp_memstats, MEMPROF_HASH_BUCKETS+1, sizeof(tmp_memstats[0]), cmp_memprof_addr);
749 else
750 qsort(tmp_memstats, MEMPROF_HASH_BUCKETS+1, sizeof(tmp_memstats[0]), cmp_memprof_stats);
Willy Tarreau993d44d2021-05-05 18:07:02 +0200751
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200752 if (!ctx->linenum)
Willy Tarreau993d44d2021-05-05 18:07:02 +0200753 chunk_appendf(&trash,
754 "Alloc/Free statistics by call place:\n"
Willy Tarreau616491b2021-05-11 09:26:23 +0200755 " Calls | Tot Bytes | Caller and method\n"
Willy Tarreau993d44d2021-05-05 18:07:02 +0200756 "<- alloc -> <- free ->|<-- alloc ---> <-- free ---->|\n");
757
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200758 max_lines = ctx->maxcnt;
Willy Tarreau993d44d2021-05-05 18:07:02 +0200759 if (!max_lines)
760 max_lines = MEMPROF_HASH_BUCKETS + 1;
761
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200762 for (i = ctx->linenum; i < max_lines; i++) {
Willy Tarreau993d44d2021-05-05 18:07:02 +0200763 struct memprof_stats *entry = &tmp_memstats[i];
764
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200765 ctx->linenum = i;
Willy Tarreau993d44d2021-05-05 18:07:02 +0200766 if (!entry->alloc_calls && !entry->free_calls)
767 continue;
768 chunk_appendf(&trash, "%11llu %11llu %14llu %14llu| %16p ",
769 entry->alloc_calls, entry->free_calls,
770 entry->alloc_tot, entry->free_tot,
771 entry->caller);
772
773 if (entry->caller)
774 resolve_sym_name(&trash, NULL, entry->caller);
775 else
776 chunk_appendf(&trash, "[other]");
777
Willy Tarreau8cce4d72021-10-22 16:26:12 +0200778 chunk_appendf(&trash," %s(%lld)", memprof_methods[entry->method],
Willy Tarreau616491b2021-05-11 09:26:23 +0200779 (long long)(entry->alloc_tot - entry->free_tot) / (long long)(entry->alloc_calls + entry->free_calls));
Willy Tarreau993d44d2021-05-05 18:07:02 +0200780
Willy Tarreau8cce4d72021-10-22 16:26:12 +0200781 if (entry->alloc_tot && entry->free_tot) {
782 /* that's a realloc, show the total diff to help spot leaks */
783 chunk_appendf(&trash," [delta=%lld]", (long long)(entry->alloc_tot - entry->free_tot));
784 }
785
Willy Tarreau42b180d2022-08-17 09:35:16 +0200786 if (entry->info) {
787 /* that's a pool name */
788 const struct pool_head *pool = entry->info;
789 chunk_appendf(&trash," [pool=%s]", pool->name);
790 }
791
Willy Tarreau8cce4d72021-10-22 16:26:12 +0200792 chunk_appendf(&trash, "\n");
793
Willy Tarreaud0a06d52022-05-18 15:07:19 +0200794 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreau993d44d2021-05-05 18:07:02 +0200795 return 0;
Willy Tarreau993d44d2021-05-05 18:07:02 +0200796 }
797
Willy Tarreaud0a06d52022-05-18 15:07:19 +0200798 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreau993d44d2021-05-05 18:07:02 +0200799 return 0;
Willy Tarreau993d44d2021-05-05 18:07:02 +0200800
Willy Tarreauf5fb8582021-05-11 14:06:24 +0200801 tot_alloc_calls = tot_free_calls = tot_alloc_bytes = tot_free_bytes = 0;
802 for (i = 0; i < max_lines; i++) {
803 tot_alloc_calls += tmp_memstats[i].alloc_calls;
804 tot_free_calls += tmp_memstats[i].free_calls;
805 tot_alloc_bytes += tmp_memstats[i].alloc_tot;
806 tot_free_bytes += tmp_memstats[i].free_tot;
807 }
808
809 chunk_appendf(&trash,
810 "-----------------------|-----------------------------|\n"
811 "%11llu %11llu %14llu %14llu| <- Total; Delta_calls=%lld; Delta_bytes=%lld\n",
812 tot_alloc_calls, tot_free_calls,
813 tot_alloc_bytes, tot_free_bytes,
814 tot_alloc_calls - tot_free_calls,
815 tot_alloc_bytes - tot_free_bytes);
816
Willy Tarreaud0a06d52022-05-18 15:07:19 +0200817 if (applet_putchk(appctx, &trash) == -1)
Willy Tarreauf5fb8582021-05-11 14:06:24 +0200818 return 0;
Willy Tarreauf5fb8582021-05-11 14:06:24 +0200819
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200820 ctx->linenum = 0; // reset first line to dump
821 if ((ctx->dump_step & 4) == 0)
822 ctx->dump_step++; // next step
Willy Tarreau993d44d2021-05-05 18:07:02 +0200823
824 skip_mem:
825#endif // USE_MEMORY_PROFILING
826
Willy Tarreau75c62c22018-11-22 11:02:09 +0100827 return 1;
828}
829
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200830/* parse a "show profiling" command. It returns 1 on failure, 0 if it starts to dump.
831 * - cli.i0 is set to the first state (0=all, 4=status, 5=tasks, 6=memory)
832 * - cli.o1 is set to 1 if the output must be sorted by addr instead of usage
833 * - cli.o0 is set to the number of lines of output
834 */
Willy Tarreau42712cb2021-05-05 17:48:13 +0200835static int cli_parse_show_profiling(char **args, char *payload, struct appctx *appctx, void *private)
836{
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200837 struct show_prof_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200838 int arg;
839
Willy Tarreau42712cb2021-05-05 17:48:13 +0200840 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
841 return 1;
842
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200843 for (arg = 2; *args[arg]; arg++) {
844 if (strcmp(args[arg], "all") == 0) {
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200845 ctx->dump_step = 0; // will cycle through 0,1,2; default
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200846 }
847 else if (strcmp(args[arg], "status") == 0) {
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200848 ctx->dump_step = 4; // will visit status only
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200849 }
850 else if (strcmp(args[arg], "tasks") == 0) {
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200851 ctx->dump_step = 5; // will visit tasks only
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200852 }
853 else if (strcmp(args[arg], "memory") == 0) {
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200854 ctx->dump_step = 6; // will visit memory only
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200855 }
856 else if (strcmp(args[arg], "byaddr") == 0) {
Willy Tarreaue86bc352022-09-08 16:38:10 +0200857 ctx->by_what = 1; // sort output by address instead of usage
858 }
859 else if (strcmp(args[arg], "bytime") == 0) {
860 ctx->by_what = 2; // sort output by total time instead of usage
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200861 }
Willy Tarreaudc89b182022-09-08 16:05:57 +0200862 else if (strcmp(args[arg], "aggr") == 0) {
863 ctx->aggr = 1; // aggregate output by callee
864 }
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200865 else if (isdigit((unsigned char)*args[arg])) {
Willy Tarreaue8d006a2022-05-05 14:19:00 +0200866 ctx->maxcnt = atoi(args[arg]); // number of entries to dump
Willy Tarreauf1c8a382021-05-13 10:00:17 +0200867 }
868 else
Willy Tarreaue86bc352022-09-08 16:38:10 +0200869 return cli_err(appctx, "Expects either 'all', 'status', 'tasks', 'memory', 'byaddr', 'bytime', 'aggr' or a max number of output lines.\n");
Willy Tarreau42712cb2021-05-05 17:48:13 +0200870 }
871 return 0;
872}
873
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100874/* This function scans all threads' run queues and collects statistics about
875 * running tasks. It returns 0 if the output buffer is full and it needs to be
876 * called again, otherwise non-zero.
877 */
878static int cli_io_handler_show_tasks(struct appctx *appctx)
879{
Willy Tarreaua3423872022-09-07 18:49:55 +0200880 struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
Willy Tarreauc12b3212022-05-27 11:08:15 +0200881 struct stconn *sc = appctx_sc(appctx);
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100882 struct buffer *name_buffer = get_trash_chunk();
883 struct sched_activity *entry;
884 const struct tasklet *tl;
885 const struct task *t;
886 uint64_t now_ns, lat;
Willy Tarreau319d1362022-06-16 16:28:01 +0200887 struct eb32_node *rqnode;
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100888 uint64_t tot_calls;
889 int thr, queue;
890 int i, max;
891
Christopher Faulet87633c32023-04-03 18:32:50 +0200892 /* FIXME: Don't watch the other side ! */
Christopher Faulet208c7122023-04-13 16:16:15 +0200893 if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100894 return 1;
895
896 /* It's not possible to scan queues in small chunks and yield in the
897 * middle of the dump and come back again. So what we're doing instead
898 * is to freeze all threads and inspect their queues at once as fast as
899 * possible, using a sched_activity array to collect metrics with
900 * limited collision, then we'll report statistics only. The tasks'
901 * #calls will reflect the number of occurrences, and the lat_time will
Willy Tarreau75f72332021-01-29 15:04:16 +0100902 * reflect the latency when set. We prefer to take the time before
903 * calling thread_isolate() so that the wait time doesn't impact the
904 * measurement accuracy. However this requires to take care of negative
905 * times since tasks might be queued after we retrieve it.
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100906 */
907
908 now_ns = now_mono_time();
909 memset(tmp_activity, 0, sizeof(tmp_activity));
910
911 thread_isolate();
912
913 /* 1. global run queue */
914
915#ifdef USE_THREAD
Willy Tarreau6f780382022-06-16 15:30:50 +0200916 for (thr = 0; thr < global.nbthread; thr++) {
917 /* task run queue */
Willy Tarreau319d1362022-06-16 16:28:01 +0200918 rqnode = eb32_first(&ha_thread_ctx[thr].rqueue_shared);
Willy Tarreau6f780382022-06-16 15:30:50 +0200919 while (rqnode) {
Willy Tarreau319d1362022-06-16 16:28:01 +0200920 t = eb32_entry(rqnode, struct task, rq);
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200921 entry = sched_activity_entry(tmp_activity, t->process, NULL);
Willy Tarreau04e50b32022-09-07 14:49:50 +0200922 if (t->wake_date) {
923 lat = now_ns - t->wake_date;
Willy Tarreau6f780382022-06-16 15:30:50 +0200924 if ((int64_t)lat > 0)
925 entry->lat_time += lat;
926 }
927 entry->calls++;
Willy Tarreau319d1362022-06-16 16:28:01 +0200928 rqnode = eb32_next(rqnode);
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100929 }
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100930 }
931#endif
932 /* 2. all threads's local run queues */
933 for (thr = 0; thr < global.nbthread; thr++) {
934 /* task run queue */
Willy Tarreau319d1362022-06-16 16:28:01 +0200935 rqnode = eb32_first(&ha_thread_ctx[thr].rqueue);
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100936 while (rqnode) {
Willy Tarreau319d1362022-06-16 16:28:01 +0200937 t = eb32_entry(rqnode, struct task, rq);
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200938 entry = sched_activity_entry(tmp_activity, t->process, NULL);
Willy Tarreau04e50b32022-09-07 14:49:50 +0200939 if (t->wake_date) {
940 lat = now_ns - t->wake_date;
Willy Tarreau75f72332021-01-29 15:04:16 +0100941 if ((int64_t)lat > 0)
942 entry->lat_time += lat;
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100943 }
944 entry->calls++;
Willy Tarreau319d1362022-06-16 16:28:01 +0200945 rqnode = eb32_next(rqnode);
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100946 }
947
948 /* shared tasklet list */
Willy Tarreau1a9c9222021-10-01 11:30:33 +0200949 list_for_each_entry(tl, mt_list_to_list(&ha_thread_ctx[thr].shared_tasklet_list), list) {
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100950 t = (const struct task *)tl;
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200951 entry = sched_activity_entry(tmp_activity, t->process, NULL);
Willy Tarreau04e50b32022-09-07 14:49:50 +0200952 if (!TASK_IS_TASKLET(t) && t->wake_date) {
953 lat = now_ns - t->wake_date;
Willy Tarreau75f72332021-01-29 15:04:16 +0100954 if ((int64_t)lat > 0)
955 entry->lat_time += lat;
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100956 }
957 entry->calls++;
958 }
959
960 /* classful tasklets */
961 for (queue = 0; queue < TL_CLASSES; queue++) {
Willy Tarreau1a9c9222021-10-01 11:30:33 +0200962 list_for_each_entry(tl, &ha_thread_ctx[thr].tasklets[queue], list) {
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100963 t = (const struct task *)tl;
Willy Tarreau3d4cdb12022-09-07 18:37:47 +0200964 entry = sched_activity_entry(tmp_activity, t->process, NULL);
Willy Tarreau04e50b32022-09-07 14:49:50 +0200965 if (!TASK_IS_TASKLET(t) && t->wake_date) {
966 lat = now_ns - t->wake_date;
Willy Tarreau75f72332021-01-29 15:04:16 +0100967 if ((int64_t)lat > 0)
968 entry->lat_time += lat;
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100969 }
970 entry->calls++;
971 }
972 }
973 }
974
975 /* hopefully we're done */
976 thread_release();
977
978 chunk_reset(&trash);
979
980 tot_calls = 0;
Willy Tarreaua3423872022-09-07 18:49:55 +0200981 for (i = 0; i < SCHED_ACT_HASH_BUCKETS; i++)
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100982 tot_calls += tmp_activity[i].calls;
983
Willy Tarreaua3423872022-09-07 18:49:55 +0200984 qsort(tmp_activity, SCHED_ACT_HASH_BUCKETS, sizeof(tmp_activity[0]), cmp_sched_activity_calls);
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100985
986 chunk_appendf(&trash, "Running tasks: %d (%d threads)\n"
987 " function places %% lat_tot lat_avg\n",
988 (int)tot_calls, global.nbthread);
989
Willy Tarreaua3423872022-09-07 18:49:55 +0200990 for (i = 0; i < SCHED_ACT_HASH_BUCKETS && tmp_activity[i].calls; i++) {
Willy Tarreau7eff06e2021-01-29 11:32:55 +0100991 chunk_reset(name_buffer);
992
993 if (!tmp_activity[i].func)
994 chunk_printf(name_buffer, "other");
995 else
996 resolve_sym_name(name_buffer, "", tmp_activity[i].func);
997
998 /* reserve 35 chars for name+' '+#calls, knowing that longer names
999 * are often used for less often called functions.
1000 */
1001 max = 35 - name_buffer->data;
1002 if (max < 1)
1003 max = 1;
1004 chunk_appendf(&trash, " %s%*llu %3d.%1d",
1005 name_buffer->area, max, (unsigned long long)tmp_activity[i].calls,
1006 (int)(100ULL * tmp_activity[i].calls / tot_calls),
1007 (int)((1000ULL * tmp_activity[i].calls / tot_calls)%10));
1008 print_time_short(&trash, " ", tmp_activity[i].lat_time, "");
1009 print_time_short(&trash, " ", tmp_activity[i].lat_time / tmp_activity[i].calls, "\n");
1010 }
1011
Willy Tarreaud0a06d52022-05-18 15:07:19 +02001012 if (applet_putchk(appctx, &trash) == -1) {
Willy Tarreau7eff06e2021-01-29 11:32:55 +01001013 /* failed, try again */
Willy Tarreau7eff06e2021-01-29 11:32:55 +01001014 return 0;
1015 }
1016 return 1;
1017}
1018
Willy Tarreauf9607f82022-11-25 15:32:38 +01001019/* This function dumps some activity counters used by developers and support to
1020 * rule out some hypothesis during bug reports. It returns 0 if the output
1021 * buffer is full and it needs to be called again, otherwise non-zero. It dumps
1022 * everything at once in the buffer and is not designed to do it in multiple
1023 * passes.
1024 */
1025static int cli_io_handler_show_activity(struct appctx *appctx)
1026{
1027 struct stconn *sc = appctx_sc(appctx);
1028 struct show_activity_ctx *actctx = appctx->svcctx;
1029 int tgt = actctx->thr; // target thread, -1 for all, 0 for total only
Willy Tarreaub68d3082023-04-27 14:47:34 +02001030 uint up_sec, up_usec;
Willy Tarreau6ed0b982023-05-03 15:52:19 +02001031 int base_line;
Willy Tarreaub68d3082023-04-27 14:47:34 +02001032 ullong up;
Willy Tarreauf9607f82022-11-25 15:32:38 +01001033
Christopher Faulet87633c32023-04-03 18:32:50 +02001034 /* FIXME: Don't watch the other side ! */
Christopher Faulet208c7122023-04-13 16:16:15 +02001035 if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
Willy Tarreauf9607f82022-11-25 15:32:38 +01001036 return 1;
1037
Willy Tarreau8ee0d112023-05-03 15:25:04 +02001038 /* this macro is used below to dump values. The thread number is "thr",
1039 * and runs from 0 to nbt-1 when values are printed using the formula.
1040 */
Willy Tarreau5ddf9be2023-05-03 14:28:35 +02001041#undef SHOW_VAL
Willy Tarreau8ee0d112023-05-03 15:25:04 +02001042#define SHOW_VAL(header, x, formula) \
Willy Tarreauf9607f82022-11-25 15:32:38 +01001043 do { \
1044 unsigned int _v[MAX_THREADS]; \
1045 unsigned int _tot; \
Willy Tarreau8ee0d112023-05-03 15:25:04 +02001046 const int _nbt = global.nbthread; \
1047 int thr; \
1048 _tot = thr = 0; \
Willy Tarreauf9607f82022-11-25 15:32:38 +01001049 do { \
Willy Tarreau8ee0d112023-05-03 15:25:04 +02001050 _tot += _v[thr] = (x); \
1051 } while (++thr < _nbt); \
1052 for (thr = -2; thr <= _nbt; thr++) { \
1053 if (thr == -2) { \
1054 /* line header */ \
1055 chunk_appendf(&trash, "%s", header); \
1056 } \
1057 else if (thr == -1) { \
1058 /* aggregate value only for multi-thread: all & 0 */ \
1059 if (_nbt > 1 && tgt <= 0) \
1060 chunk_appendf(&trash, " %u%s", \
1061 (formula), \
1062 (tgt < 0) ? \
1063 " [" : ""); \
1064 } \
1065 else if (thr < _nbt) { \
1066 /* individual value only for all or exact value */ \
1067 if (tgt == -1 || tgt == thr+1) \
1068 chunk_appendf(&trash, " %u", \
1069 _v[thr]); \
1070 } \
1071 else /* thr == _nbt */ { \
1072 chunk_appendf(&trash, "%s\n", \
1073 (_nbt > 1 && tgt < 0) ? \
1074 " ]" : ""); \
1075 } \
Willy Tarreauf9607f82022-11-25 15:32:38 +01001076 } \
Willy Tarreauf9607f82022-11-25 15:32:38 +01001077 } while (0)
1078
Willy Tarreauf9607f82022-11-25 15:32:38 +01001079 /* retrieve uptime */
Willy Tarreauc05d30e2023-04-28 14:50:29 +02001080 up = now_ns - start_time_ns;
Willy Tarreaub68d3082023-04-27 14:47:34 +02001081 up_sec = ns_to_sec(up);
1082 up_usec = (up / 1000U) % 1000000U;
Willy Tarreauf9607f82022-11-25 15:32:38 +01001083
Willy Tarreau6ed0b982023-05-03 15:52:19 +02001084 /* iterate over all dump lines. It happily skips over holes so it's
1085 * not a problem not to have an exact match, we just need to have
1086 * stable and consistent lines during a dump.
1087 */
1088 base_line = __LINE__;
1089 do {
1090 chunk_reset(&trash);
Willy Tarreaua465b212023-05-03 14:51:05 +02001091
Willy Tarreau6ed0b982023-05-03 15:52:19 +02001092 switch (actctx->line + base_line) {
1093 case __LINE__: chunk_appendf(&trash, "thread_id: %u (%u..%u)\n", tid + 1, 1, global.nbthread); break;
1094 case __LINE__: chunk_appendf(&trash, "date_now: %lu.%06lu\n", (ulong)date.tv_sec, (ulong)date.tv_usec); break;
1095 case __LINE__: chunk_appendf(&trash, "uptime_now: %u.%06u\n", up_sec, up_usec); break;
1096 case __LINE__: SHOW_VAL("ctxsw:", activity[thr].ctxsw, _tot); break;
1097 case __LINE__: SHOW_VAL("tasksw:", activity[thr].tasksw, _tot); break;
1098 case __LINE__: SHOW_VAL("empty_rq:", activity[thr].empty_rq, _tot); break;
1099 case __LINE__: SHOW_VAL("long_rq:", activity[thr].long_rq, _tot); break;
1100 case __LINE__: SHOW_VAL("loops:", activity[thr].loops, _tot); break;
1101 case __LINE__: SHOW_VAL("wake_tasks:", activity[thr].wake_tasks, _tot); break;
1102 case __LINE__: SHOW_VAL("wake_signal:", activity[thr].wake_signal, _tot); break;
1103 case __LINE__: SHOW_VAL("poll_io:", activity[thr].poll_io, _tot); break;
1104 case __LINE__: SHOW_VAL("poll_exp:", activity[thr].poll_exp, _tot); break;
1105 case __LINE__: SHOW_VAL("poll_drop_fd:", activity[thr].poll_drop_fd, _tot); break;
1106 case __LINE__: SHOW_VAL("poll_skip_fd:", activity[thr].poll_skip_fd, _tot); break;
1107 case __LINE__: SHOW_VAL("conn_dead:", activity[thr].conn_dead, _tot); break;
1108 case __LINE__: SHOW_VAL("stream_calls:", activity[thr].stream_calls, _tot); break;
1109 case __LINE__: SHOW_VAL("pool_fail:", activity[thr].pool_fail, _tot); break;
1110 case __LINE__: SHOW_VAL("buf_wait:", activity[thr].buf_wait, _tot); break;
1111 case __LINE__: SHOW_VAL("cpust_ms_tot:", activity[thr].cpust_total / 2, _tot); break;
1112 case __LINE__: SHOW_VAL("cpust_ms_1s:", read_freq_ctr(&activity[thr].cpust_1s) / 2, _tot); break;
1113 case __LINE__: SHOW_VAL("cpust_ms_15s:", read_freq_ctr_period(&activity[thr].cpust_15s, 15000) / 2, _tot); break;
1114 case __LINE__: SHOW_VAL("avg_cpu_pct:", (100 - ha_thread_ctx[thr].idle_pct), (_tot + _nbt/2) / _nbt); break;
1115 case __LINE__: SHOW_VAL("avg_loop_us:", swrate_avg(activity[thr].avg_loop_us, TIME_STATS_SAMPLES), (_tot + _nbt/2) / _nbt); break;
1116 case __LINE__: SHOW_VAL("accepted:", activity[thr].accepted, _tot); break;
1117 case __LINE__: SHOW_VAL("accq_pushed:", activity[thr].accq_pushed, _tot); break;
1118 case __LINE__: SHOW_VAL("accq_full:", activity[thr].accq_full, _tot); break;
Willy Tarreauf9607f82022-11-25 15:32:38 +01001119#ifdef USE_THREAD
Willy Tarreau6ed0b982023-05-03 15:52:19 +02001120 case __LINE__: SHOW_VAL("accq_ring:", accept_queue_ring_len(&accept_queue_rings[thr]), _tot); break;
1121 case __LINE__: SHOW_VAL("fd_takeover:", activity[thr].fd_takeover, _tot); break;
Willy Tarreauf9607f82022-11-25 15:32:38 +01001122#endif
1123
1124#if defined(DEBUG_DEV)
Willy Tarreau6ed0b982023-05-03 15:52:19 +02001125 /* keep these ones at the end */
1126 case __LINE__: SHOW_VAL("ctr0:", activity[thr].ctr0, _tot); break;
1127 case __LINE__: SHOW_VAL("ctr1:", activity[thr].ctr1, _tot); break;
1128 case __LINE__: SHOW_VAL("ctr2:", activity[thr].ctr2, _tot); break;
Willy Tarreauf9607f82022-11-25 15:32:38 +01001129#endif
Willy Tarreau6ed0b982023-05-03 15:52:19 +02001130 }
1131#undef SHOW_VAL
Willy Tarreauf9607f82022-11-25 15:32:38 +01001132
Willy Tarreau6ed0b982023-05-03 15:52:19 +02001133 if (applet_putchk(appctx, &trash) == -1) {
1134 /* buffer full, retry later */
1135 return 0;
1136 }
1137 /* line was dumped, let's commit it */
1138 actctx->line++;
1139 } while (actctx->line + base_line < __LINE__);
Willy Tarreauf9607f82022-11-25 15:32:38 +01001140
Willy Tarreauf9607f82022-11-25 15:32:38 +01001141 /* dump complete */
1142 return 1;
1143}
1144
1145/* parse a "show activity" CLI request. Returns 0 if it needs to continue, 1 if it
1146 * wants to stop here. It sets a show_activity_ctx context where, if a specific
1147 * thread is requested, it puts the thread number into ->thr otherwise sets it to
1148 * -1.
1149 */
1150static int cli_parse_show_activity(char **args, char *payload, struct appctx *appctx, void *private)
1151{
1152 struct show_activity_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
1153
1154 if (!cli_has_level(appctx, ACCESS_LVL_OPER))
1155 return 1;
1156
1157 ctx->thr = -1; // show all by default
1158 if (*args[2])
1159 ctx->thr = atoi(args[2]);
1160
1161 if (ctx->thr < -1 || ctx->thr > global.nbthread)
1162 return cli_err(appctx, "Thread ID number must be between -1 and nbthread\n");
1163
1164 return 0;
1165}
1166
Willy Tarreau75c62c22018-11-22 11:02:09 +01001167/* config keyword parsers */
1168static struct cfg_kw_list cfg_kws = {ILH, {
Willy Tarreauca3afc22021-05-05 18:33:19 +02001169#ifdef USE_MEMORY_PROFILING
1170 { CFG_GLOBAL, "profiling.memory", cfg_parse_prof_memory },
1171#endif
Willy Tarreau75c62c22018-11-22 11:02:09 +01001172 { CFG_GLOBAL, "profiling.tasks", cfg_parse_prof_tasks },
1173 { 0, NULL, NULL }
1174}};
1175
Willy Tarreau0108d902018-11-25 19:14:37 +01001176INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
1177
Willy Tarreau75c62c22018-11-22 11:02:09 +01001178/* register cli keywords */
1179static struct cli_kw_list cli_kws = {{ },{
Daniel Corbett67b3cef2021-05-10 14:08:40 -04001180 { { "set", "profiling", NULL }, "set profiling <what> {auto|on|off} : enable/disable resource profiling (tasks,memory)", cli_parse_set_profiling, NULL },
Willy Tarreauf9607f82022-11-25 15:32:38 +01001181 { { "show", "activity", NULL }, "show activity [-1|0|thread_num] : show per-thread activity stats (for support/developers)", cli_parse_show_activity, cli_io_handler_show_activity, NULL },
Willy Tarreaudc89b182022-09-08 16:05:57 +02001182 { { "show", "profiling", NULL }, "show profiling [<what>|<#lines>|<opts>]*: show profiling state (all,status,tasks,memory)", cli_parse_show_profiling, cli_io_handler_show_profiling, NULL },
Willy Tarreaub205bfd2021-05-07 11:38:37 +02001183 { { "show", "tasks", NULL }, "show tasks : show running tasks", NULL, cli_io_handler_show_tasks, NULL },
Willy Tarreau75c62c22018-11-22 11:02:09 +01001184 {{},}
1185}};
1186
Willy Tarreau0108d902018-11-25 19:14:37 +01001187INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);