BUG/MAJOR: fd/threads, task/threads: ensure all spin locks are unlocked
Calculate if the fd or task should be locked once, before locking, and
reuse the calculation when determing when to unlock.
Fixes a race condition added in 87d54a9a for fds, and b20aa9ee for tasks,
released in 1.9-dev4. When one thread modifies thread_mask to be a single
thread for a task or fd while a second thread has locked or is waiting on a
lock for that task or fd, the second thread will not unlock it. For FDs,
this is observable when a listener is polled by multiple threads, and is
closed while those threads have events pending. For tasks, this seems
possible, where task_set_affinity is called, but I did not observe it.
This must be backported to 1.9.
diff --git a/src/fd.c b/src/fd.c
index 7fdd56b..314e10f 100644
--- a/src/fd.c
+++ b/src/fd.c
@@ -411,6 +411,7 @@
static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist)
{
int fd, old_fd, e;
+ unsigned long locked;
for (old_fd = fd = fdlist->first; fd != -1; fd = fdtab[fd].cache.next) {
if (fd == -2) {
@@ -427,7 +428,8 @@
continue;
HA_ATOMIC_OR(&fd_cache_mask, tid_bit);
- if (atleast2(fdtab[fd].thread_mask) && HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
+ locked = atleast2(fdtab[fd].thread_mask);
+ if (locked && HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
activity[tid].fd_lock++;
continue;
}
@@ -442,13 +444,13 @@
fdtab[fd].ev |= FD_POLL_OUT;
if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) {
- if (atleast2(fdtab[fd].thread_mask))
+ if (locked)
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].iocb(fd);
}
else {
fd_release_cache_entry(fd);
- if (atleast2(fdtab[fd].thread_mask))
+ if (locked)
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
}