BUG/MEDIUM: cli: make "show cli sockets" really yield
This command was introduced in 1.8 with commit eceddf722 ("MEDIUM: cli:
'show cli sockets' list the CLI sockets") but its yielding doesn't work.
Each time it enters, it restarts from the last bind_conf but enumerates
all listening sockets again, thus it loops forever. The risk that it
happens in field is low but it easily triggers on port ranges after
400-500 sockets depending on the length of their addresses:
global
stats socket /tmp/sock1 level admin
stats socket 192.168.8.176:30000-31000 level operator
$ socat /tmp/sock1 - <<< "show cli sockets"
(...)
ipv4@192.168.8.176:30426 operator all
ipv4@192.168.8.176:30427 operator all
ipv4@192.168.8.176:30428 operator all
ipv4@192.168.8.176:30000 operator all
ipv4@192.168.8.176:30001 operator all
ipv4@192.168.8.176:30002 operator all
^C
This patch adds the minimally needed restart point for the listener so
that it can easily be backported. Some more cleanup is needed though.
(cherry picked from commit 241a006d798b4305a64746f8da0b9fdeade533a5)
[cf: Context adjustment (cs -> si)]
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
(cherry picked from commit 2c9aed755365bec63804fdbadad0e6db1aa51b00)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/cli.c b/src/cli.c
index b410c78..c414382 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1475,11 +1475,11 @@
/*
* CLI IO handler for `show cli sockets`.
- * Uses ctx.cli.p0 to store the restart pointer.
+ * Uses ctx.cli.p0 to store the bind_conf pointer, and cli.p1 for the listener.
*/
static int cli_io_handler_show_cli_sock(struct appctx *appctx)
{
- struct bind_conf *bind_conf;
+ struct bind_conf *bind_conf = appctx->ctx.cli.p0;
struct stream_interface *si = appctx->owner;
chunk_reset(&trash);
@@ -1496,23 +1496,16 @@
case STAT_ST_LIST:
if (global.cli_fe) {
- list_for_each_entry(bind_conf, &global.cli_fe->conf.bind, by_fe) {
- struct listener *l;
+ if (!bind_conf)
+ bind_conf = LIST_ELEM(global.cli_fe->conf.bind.n, typeof(bind_conf), by_fe);
- /*
- * get the latest dumped node in appctx->ctx.cli.p0
- * if the current node is the first of the list
- */
+ list_for_each_entry_from(bind_conf, &global.cli_fe->conf.bind, by_fe) {
+ struct listener *l = appctx->ctx.cli.p1;
- if (appctx->ctx.cli.p0 &&
- &bind_conf->by_fe == (&global.cli_fe->conf.bind)->n) {
- /* change the current node to the latest dumped and continue the loop */
- bind_conf = LIST_ELEM(appctx->ctx.cli.p0, typeof(bind_conf), by_fe);
- continue;
- }
-
- list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+ if (!l)
+ l = LIST_ELEM(bind_conf->listeners.n, typeof(l), by_bind);
+ list_for_each_entry_from(l, &bind_conf->listeners, by_bind) {
char addr[46];
char port[6];
@@ -1562,11 +1555,13 @@
}
if (ci_putchk(si_ic(si), &trash) == -1) {
+ /* buffer full, we must yield */
+ appctx->ctx.cli.p0 = bind_conf;
+ appctx->ctx.cli.p1 = l;
si_rx_room_blk(si);
return 0;
}
}
- appctx->ctx.cli.p0 = &bind_conf->by_fe; /* store the latest list node dumped */
}
}
/* fall through */