BUILD: task: use list_to_mt_list() instead of casting list to mt_list
There were a few casts of list* to mt_list* that were upsetting some
old compilers (not sure about the effect on others). We had created
list_to_mt_list() purposely for this, let's use it instead of applying
this cast.
diff --git a/include/haproxy/task.h b/include/haproxy/task.h
index b3fea6f..39e5121 100644
--- a/include/haproxy/task.h
+++ b/include/haproxy/task.h
@@ -400,7 +400,7 @@
*/
static inline void tasklet_remove_from_tasklet_list(struct tasklet *t)
{
- if (MT_LIST_DELETE((struct mt_list *)&t->list)) {
+ if (MT_LIST_DELETE(list_to_mt_list(&t->list))) {
_HA_ATOMIC_AND(&t->state, ~TASK_IN_LIST);
_HA_ATOMIC_DEC(&ha_thread_ctx[t->tid >= 0 ? t->tid : tid].rq_total);
}
@@ -556,7 +556,7 @@
/* Should only be called by the thread responsible for the tasklet */
static inline void tasklet_free(struct tasklet *tl)
{
- if (MT_LIST_DELETE((struct mt_list *)&tl->list))
+ if (MT_LIST_DELETE(list_to_mt_list(&tl->list)))
_HA_ATOMIC_DEC(&ha_thread_ctx[tl->tid >= 0 ? tl->tid : tid].rq_total);
#ifdef DEBUG_TASK
diff --git a/src/task.c b/src/task.c
index 3b351a7..ff98390 100644
--- a/src/task.c
+++ b/src/task.c
@@ -89,7 +89,7 @@
/* Beware: tasks that have never run don't have their ->list empty yet! */
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
- (struct mt_list *)&((struct tasklet *)t)->list);
+ list_to_mt_list(&((struct tasklet *)t)->list));
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
_HA_ATOMIC_INC(&ha_thread_ctx[thr].tasks_in_list);
if (sleeping_thread_mask & (1UL << thr)) {
@@ -128,7 +128,7 @@
if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_IN_LIST | TASK_KILLED)) {
thr = t->tid > 0 ? t->tid: tid;
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
- (struct mt_list *)&t->list);
+ list_to_mt_list(&t->list));
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
if (sleeping_thread_mask & (1UL << thr)) {
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
@@ -172,7 +172,7 @@
_HA_ATOMIC_INC(&th_ctx->rq_total);
} else {
/* this tasklet runs on a specific thread. */
- MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list, (struct mt_list *)&tl->list);
+ MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list, list_to_mt_list(&tl->list));
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
if (sleeping_thread_mask & (1UL << thr)) {
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));