MINOR: tasks: Use a bitfield to track tasks activity per-thread
a bitfield has been added to know if there are runnable tasks for a thread. When
a task is woken up, the bits corresponding to its thread_mask are set. When all
tasks for a thread have been evaluated without any wakeup, the thread is removed
from active ones by unsetting its tid_bit from the bitfield.
diff --git a/include/proto/task.h b/include/proto/task.h
index 9461b59..7639ed7 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -83,6 +83,7 @@
/* a few exported variables */
extern unsigned int nb_tasks; /* total number of tasks */
+extern unsigned long active_tasks_mask; /* Mask of threads with active tasks */
extern unsigned int tasks_run_queue; /* run queue size */
extern unsigned int tasks_run_queue_cur;
extern unsigned int nb_tasks_cur;
diff --git a/src/task.c b/src/task.c
index 6e7b9be..398e1f0 100644
--- a/src/task.c
+++ b/src/task.c
@@ -32,6 +32,7 @@
struct pool_head *pool2_notification;
unsigned int nb_tasks = 0;
+unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
unsigned int tasks_run_queue = 0;
unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
@@ -55,6 +56,7 @@
struct task *__task_wakeup(struct task *t)
{
tasks_run_queue++;
+ active_tasks_mask |= t->thread_mask;
t->rq.key = ++rqueue_ticks;
if (likely(t->nice)) {
@@ -187,6 +189,7 @@
struct task *local_tasks[16];
int local_tasks_count;
int final_tasks_count;
+
tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
nb_tasks_cur = nb_tasks;
max_processed = tasks_run_queue;
@@ -202,6 +205,7 @@
if (unlikely(global.nbthread <= 1)) {
/* when no lock is needed, this loop is much faster */
+ active_tasks_mask &= ~tid_bit;
rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
while (1) {
if (!rq_next) {
@@ -243,15 +247,17 @@
}
max_processed--;
- if (max_processed <= 0)
+ if (max_processed <= 0) {
+ active_tasks_mask |= tid_bit;
break;
+ }
}
return;
}
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
-
- do {
+ active_tasks_mask &= ~tid_bit;
+ while (1) {
/* Note: this loop is one of the fastest code path in
* the whole program. It should not be re-arranged
* without a good reason.
@@ -259,7 +265,6 @@
/* we have to restart looking up after every batch */
rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
-
for (local_tasks_count = 0; local_tasks_count < 16; local_tasks_count++) {
if (unlikely(!rq_next)) {
/* either we just started or we reached the end
@@ -318,8 +323,12 @@
else
task_queue(t);
}
- } while (max_processed > 0);
+ if (max_processed <= 0) {
+ active_tasks_mask |= tid_bit;
+ break;
+ }
+ }
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
}