MEDIUM: various: Use __ha_barrier_atomic* when relevant.
When protecting data modified by atomic operations, use __ha_barrier_atomic*
to avoid unneeded barriers on x86.
diff --git a/include/common/mini-clist.h b/include/common/mini-clist.h
index 62a62d7..074176a 100644
--- a/include/common/mini-clist.h
+++ b/include/common/mini-clist.h
@@ -192,7 +192,7 @@
n = HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY); \
if (n == LLIST_BUSY) \
continue; \
- __ha_barrier_store(); \
+ __ha_barrier_atomic_store(); \
p = HA_ATOMIC_XCHG(&n->p, LLIST_BUSY); \
if (p == LLIST_BUSY) { \
(lh)->n = n; \
diff --git a/include/common/xref.h b/include/common/xref.h
index a6291f5..48bc07a 100644
--- a/include/common/xref.h
+++ b/include/common/xref.h
@@ -32,7 +32,7 @@
/* Get the local pointer to the peer. */
local = HA_ATOMIC_XCHG(&xref->peer, XREF_BUSY);
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
/* If the local pointer is NULL, the peer no longer exists. */
if (local == NULL) {
diff --git a/src/fd.c b/src/fd.c
index 581c5aa..cb9df1f 100644
--- a/src/fd.c
+++ b/src/fd.c
@@ -203,7 +203,7 @@
goto done;
if (!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))
goto redo_next;
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
new = fd;
redo_last:
@@ -292,7 +292,7 @@
if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))
goto lock_self_prev;
#endif
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
/* Now, lock the entries of our neighbours */
if (likely(prev != -1)) {
diff --git a/src/task.c b/src/task.c
index 826e212..d7c3e05 100644
--- a/src/task.c
+++ b/src/task.c
@@ -122,7 +122,7 @@
#ifdef USE_THREAD
if (root == &rqueue) {
HA_ATOMIC_OR(&global_tasks_mask, t->thread_mask);
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
}
#endif
old_active_mask = active_tasks_mask;
@@ -401,7 +401,7 @@
}
if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
- __ha_barrier_load();
+ __ha_barrier_atomic_load();
if (global_tasks_mask & tid_bit)
HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
}
@@ -413,7 +413,7 @@
t = (struct task *)LIST_ELEM(task_per_thread[tid].task_list.n, struct tasklet *, list);
state = HA_ATOMIC_XCHG(&t->state, TASK_RUNNING);
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
task_remove_from_task_list(t);
ctx = t->context;