MINOR: tasks/activity: report the context switch and task wakeup rates
It's particularly useful to spot runaway tasks to see this. The context
switch rate covers all tasklet calls (tasks and I/O handlers) while the
task wakeups only covers tasks picked from the run queue to be executed.
High values there will indicate either an intense traffic or a bug that
mades a task go wild.
diff --git a/src/task.c b/src/task.c
index de6eda1..04476fe 100644
--- a/src/task.c
+++ b/src/task.c
@@ -20,10 +20,11 @@
#include <eb32sctree.h>
#include <eb32tree.h>
+#include <proto/fd.h>
+#include <proto/freq_ctr.h>
#include <proto/proxy.h>
#include <proto/stream.h>
#include <proto/task.h>
-#include <proto/fd.h>
DECLARE_POOL(pool_head_task, "task", sizeof(struct task));
DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
@@ -278,6 +279,8 @@
struct eb32sc_node *lrq = NULL; // next local run queue entry
struct eb32sc_node *grq = NULL; // next global run queue entry
struct task *t;
+ int to_process;
+ int wakeups;
int max_processed;
if (!(active_tasks_mask & tid_bit)) {
@@ -292,6 +295,9 @@
if (likely(niced_tasks))
max_processed = (max_processed + 3) / 4;
+ to_process = max_processed;
+ wakeups = 0;
+
/* Note: the grq lock is always held when grq is not null */
while (task_per_thread[tid].task_list_size < max_processed) {
@@ -344,6 +350,7 @@
/* And add it to the local task list */
task_insert_into_tasklet_list(t);
+ wakeups++;
}
/* release the rqueue lock */
@@ -419,6 +426,11 @@
_HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
activity[tid].long_rq++;
}
+
+ if (wakeups)
+ update_freq_ctr(&activity[tid].tasks_rate, wakeups);
+ if (to_process - max_processed)
+ update_freq_ctr(&activity[tid].ctxsw_rate, to_process - max_processed);
}
/*