REORG: thread/clock: move the clock parts of thread_info to thread_ctx
The "thread_info" name was initially chosen to store all info about
threads but since we now have a separate per-thread context, there is
no point keeping some of its elements in the thread_info struct.
As such, this patch moves prev_cpu_time, prev_mono_time and idle_pct to
thread_ctx, into the thread context, with the scheduler parts. Instead
of accessing them via "ti->" we now access them via "th_ctx->", which
makes more sense as they're totally dynamic, and will be required for
future evolutions. There's no room problem for now, the structure still
has 84 bytes available at the end.
diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h
index 3fde269..afbf191 100644
--- a/include/haproxy/tinfo-t.h
+++ b/include/haproxy/tinfo-t.h
@@ -42,9 +42,6 @@
* disabled, it contains the same info for the single running thread.
*/
struct thread_info {
- uint64_t prev_cpu_time; /* previous per thread CPU time */
- uint64_t prev_mono_time; /* previous system wide monotonic time */
- unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
unsigned int flags; /* thread info flags, TI_FL_* */
#ifdef CONFIG_HAP_POOLS
@@ -82,6 +79,9 @@
struct mt_list shared_tasklet_list; /* Tasklet to be run, woken up by other threads */
unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */
int tasks_in_list; /* Number of tasks in the per-thread tasklets list */
+ uint64_t prev_cpu_time; /* previous per thread CPU time */
+ uint64_t prev_mono_time; /* previous system wide monotonic time */
+ uint idle_pct; /* idle to total ratio over last sample (percent) */
ALWAYS_ALIGN(128);
};
diff --git a/src/clock.c b/src/clock.c
index 33d7b4d..3b9e9ff 100644
--- a/src/clock.c
+++ b/src/clock.c
@@ -247,7 +247,7 @@
now = after_poll = before_poll = date;
global_now = ((ullong)date.tv_sec << 32) + (uint)date.tv_usec;
global_now_ms = now.tv_sec * 1000 + now.tv_usec / 1000;
- ti->idle_pct = 100;
+ th_ctx->idle_pct = 100;
clock_update_date(0, 1);
}
@@ -264,7 +264,7 @@
old_now = _HA_ATOMIC_LOAD(&global_now);
now.tv_sec = old_now >> 32;
now.tv_usec = (uint)old_now;
- ti->idle_pct = 100;
+ th_ctx->idle_pct = 100;
clock_update_date(0, 1);
}
@@ -278,7 +278,7 @@
for (thr = 0; thr < MAX_THREADS; thr++) {
if (!(all_threads_mask & (1UL << thr)))
continue;
- total += HA_ATOMIC_LOAD(&ha_thread_info[thr].idle_pct);
+ total += HA_ATOMIC_LOAD(&ha_thread_ctx[thr].idle_pct);
rthr++;
}
return rthr ? total / rthr : 0;
@@ -310,7 +310,7 @@
if (samp_time < 500000)
return;
- HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
+ HA_ATOMIC_STORE(&th_ctx->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
idle_time = samp_time = 0;
}
@@ -322,8 +322,8 @@
void clock_leaving_poll(int timeout, int interrupted)
{
clock_measure_idle();
- ti->prev_cpu_time = now_cpu_time();
- ti->prev_mono_time = now_mono_time();
+ th_ctx->prev_cpu_time = now_cpu_time();
+ th_ctx->prev_mono_time = now_mono_time();
}
/* Collect date and time information before calling poll(). This will be used
@@ -346,9 +346,9 @@
new_cpu_time = now_cpu_time();
new_mono_time = now_mono_time();
- if (ti->prev_cpu_time && ti->prev_mono_time) {
- new_cpu_time -= ti->prev_cpu_time;
- new_mono_time -= ti->prev_mono_time;
+ if (th_ctx->prev_cpu_time && th_ctx->prev_mono_time) {
+ new_cpu_time -= th_ctx->prev_cpu_time;
+ new_mono_time -= th_ctx->prev_mono_time;
stolen = new_mono_time - new_cpu_time;
if (unlikely(stolen >= 500000)) {
stolen /= 500000;
diff --git a/src/compression.c b/src/compression.c
index 7a7d900..8095ecb 100644
--- a/src/compression.c
+++ b/src/compression.c
@@ -360,7 +360,7 @@
/* Verify compression rate limiting and CPU usage */
if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
- (ti->idle_pct < compress_min_idle)) { /* idle */
+ (th_ctx->idle_pct < compress_min_idle)) { /* idle */
if (comp_ctx->cur_lvl > 0)
strm->level = --comp_ctx->cur_lvl;
}
@@ -618,7 +618,7 @@
/* compression limit */
if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
- (ti->idle_pct < compress_min_idle)) { /* idle */
+ (th_ctx->idle_pct < compress_min_idle)) { /* idle */
/* decrease level */
if (comp_ctx->cur_lvl > 0) {
comp_ctx->cur_lvl--;
diff --git a/src/debug.c b/src/debug.c
index 5144094..aafa378 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -150,7 +150,7 @@
void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
{
unsigned long thr_bit = 1UL << thr;
- unsigned long long p = ha_thread_info[thr].prev_cpu_time;
+ unsigned long long p = ha_thread_ctx[thr].prev_cpu_time;
unsigned long long n = now_cpu_time_thread(thr);
int stuck = !!(ha_thread_info[thr].flags & TI_FL_STUCK);
diff --git a/src/flt_http_comp.c b/src/flt_http_comp.c
index 774b982..9d8c3fa 100644
--- a/src/flt_http_comp.c
+++ b/src/flt_http_comp.c
@@ -564,7 +564,7 @@
goto fail;
/* limit cpu usage */
- if (ti->idle_pct < compress_min_idle)
+ if (th_ctx->idle_pct < compress_min_idle)
goto fail;
/* initialize compression */
diff --git a/src/wdt.c b/src/wdt.c
index 96db84b..b789fbe 100644
--- a/src/wdt.c
+++ b/src/wdt.c
@@ -71,7 +71,7 @@
if (thr < 0 || thr >= global.nbthread)
break;
- p = ha_thread_info[thr].prev_cpu_time;
+ p = ha_thread_ctx[thr].prev_cpu_time;
n = now_cpu_time_thread(thr);
/* not yet reached the deadline of 1 sec */