MINOR: activity: report the number of times poll() reports I/O
The "show activity" output mentions a number of indicators to explain
wake up reasons but doesn't have the number of times poll() sees some
I/O. And given that multiple events can happen simultaneously, it's
not always possible to deduce this metric by subtracting.
This patch adds a new "poll_io" counter that allows one to see how
often poll() returns with at least one active FD. This should help
detect stuck events and measure various ratios of poll sub-metrics.
diff --git a/include/haproxy/activity-t.h b/include/haproxy/activity-t.h
index 3020806..4e2f9f0 100644
--- a/include/haproxy/activity-t.h
+++ b/include/haproxy/activity-t.h
@@ -40,6 +40,7 @@
unsigned int loops; // complete loops in run_poll_loop()
unsigned int wake_tasks; // active tasks prevented poll() from sleeping
unsigned int wake_signal; // pending signal prevented poll() from sleeping
+ unsigned int poll_io; // number of times poll() reported I/O events
unsigned int poll_exp; // number of times poll() sees an expired timeout (includes wake_*)
unsigned int poll_drop; // poller dropped a dead FD from the update list
unsigned int poll_dead; // poller woke up with a dead FD
@@ -51,7 +52,7 @@
unsigned int empty_rq; // calls to process_runnable_tasks() with nothing for the thread
unsigned int long_rq; // process_runnable_tasks() left with tasks in the run queue
unsigned int cpust_total; // sum of half-ms stolen per thread
- /* two unused entries left before end of first cache line */
+ /* one unused entry left before end of first cache line */
ALWAYS_ALIGN(64);
struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second
diff --git a/src/cli.c b/src/cli.c
index 22fc771..1939ec3 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1129,6 +1129,7 @@
chunk_appendf(&trash, "loops:"); SHOW_TOT(thr, activity[thr].loops);
chunk_appendf(&trash, "wake_tasks:"); SHOW_TOT(thr, activity[thr].wake_tasks);
chunk_appendf(&trash, "wake_signal:"); SHOW_TOT(thr, activity[thr].wake_signal);
+ chunk_appendf(&trash, "poll_io:"); SHOW_TOT(thr, activity[thr].poll_io);
chunk_appendf(&trash, "poll_exp:"); SHOW_TOT(thr, activity[thr].poll_exp);
chunk_appendf(&trash, "poll_drop:"); SHOW_TOT(thr, activity[thr].poll_drop);
chunk_appendf(&trash, "poll_dead:"); SHOW_TOT(thr, activity[thr].poll_dead);
diff --git a/src/ev_epoll.c b/src/ev_epoll.c
index 2501dd5..e5437a2 100644
--- a/src/ev_epoll.c
+++ b/src/ev_epoll.c
@@ -180,8 +180,10 @@
status = epoll_wait(epoll_fd[tid], epoll_events, global.tune.maxpollevents, timeout);
tv_update_date(timeout, status);
- if (status)
+ if (status) {
+ activity[tid].poll_io++;
break;
+ }
if (timeout || !wait_time)
break;
if (signal_queue_len || wake)
diff --git a/src/ev_evports.c b/src/ev_evports.c
index 6b14ea0..50d9a1e 100644
--- a/src/ev_evports.c
+++ b/src/ev_evports.c
@@ -207,6 +207,9 @@
thread_harmless_end();
+ if (nevlist > 0)
+ activity[tid].poll_io++;
+
for (i = 0; i < nevlist; i++) {
unsigned int n = 0;
int events, rebind_events;
diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c
index 3a0b0a3..9df987a 100644
--- a/src/ev_kqueue.c
+++ b/src/ev_kqueue.c
@@ -161,8 +161,10 @@
&timeout_ts); // const struct timespec *timeout
tv_update_date(timeout, status);
- if (status)
+ if (status) {
+ activity[tid].poll_io++;
break;
+ }
if (timeout || !wait_time)
break;
if (signal_queue_len || wake)
diff --git a/src/ev_poll.c b/src/ev_poll.c
index 03d32b0..69368ce 100644
--- a/src/ev_poll.c
+++ b/src/ev_poll.c
@@ -211,6 +211,9 @@
thread_harmless_end();
+ if (status > 0)
+ activity[tid].poll_io++;
+
for (count = 0; status > 0 && count < nbfd; count++) {
unsigned int n;
int e = poll_events[count].revents;
diff --git a/src/ev_select.c b/src/ev_select.c
index adc8160..d79f09b 100644
--- a/src/ev_select.c
+++ b/src/ev_select.c
@@ -188,6 +188,8 @@
if (status <= 0)
return;
+ activity[tid].poll_io++;
+
for (fds = 0; (fds * BITS_PER_INT) < maxfd; fds++) {
if ((((int *)(tmp_evts[DIR_RD]))[fds] | ((int *)(tmp_evts[DIR_WR]))[fds]) == 0)
continue;