MEDIUM: fd/threads: Make sure we don't miss a fd cache entry.

An fd cache entry might be removed and added at the end of the list, while
another thread is parsing it, if that happens, we may miss fd cache entries,
to avoid that, add a new field in the struct fdtab, "added_mask", which
contains a mask for potentially affected threads, if it is set, the
corresponding thread will set its bit in fd_cache_mask, to avoid waiting in
poll while it may have more work to do.
diff --git a/include/proto/fd.h b/include/proto/fd.h
index 595af90..535e691 100644
--- a/include/proto/fd.h
+++ b/include/proto/fd.h
@@ -105,10 +105,6 @@
 	fd_updt[fd_nbupdt++] = fd;
 }
 
-
-#define _GET_NEXT(fd) fdtab[fd].fdcache_entry.next
-#define _GET_PREV(fd) fdtab[fd].fdcache_entry.prev
-
 static inline void fd_add_to_fd_list(volatile struct fdlist *list, int fd)
 {
 	int next;
@@ -117,16 +113,11 @@
 	int last;
 
 redo_next:
-	next = _GET_NEXT(fd);
-	/*
-	 * Check that we're not already in the cache, and if not, lock us.
-	 * <= -3 means not in the cache, -2 means locked, -1 means we're
-	 * in the cache, and the last element, >= 0 gives the FD of the next
-	 * in the cache.
-	 */
+	next = fdtab[fd].cache.next;
+	/* Check that we're not already in the cache, and if not, lock us. */
 	if (next >= -2)
 		goto done;
-	if (!HA_ATOMIC_CAS(&_GET_NEXT(fd), &next, -2))
+	if (!HA_ATOMIC_CAS(&fdtab[fd].cache.next, &next, -2))
 		goto redo_next;
 	__ha_barrier_store();
 redo_last:
@@ -137,7 +128,7 @@
 	if (unlikely(last == -1)) {
 		/* list is empty, try to add ourselves alone so that list->last=fd */
 
-		_GET_PREV(fd) = last;
+		fdtab[fd].cache.prev = last;
 
 		/* Make sure the "prev" store is visible before we update the last entry */
 		__ha_barrier_store();
@@ -150,14 +141,14 @@
 		/* since we're alone at the end of the list and still locked(-2),
 		 * we know noone tried to add past us. Mark the end of list.
 		 */
-		_GET_NEXT(fd) = -1;
+		fdtab[fd].cache.next = -1;
 		goto done; /* We're done ! */
 	} else {
 		/* non-empty list, add past the tail */
 		do {
 			new = fd;
 			old = -1;
-			_GET_PREV(fd) = last;
+			fdtab[fd].cache.prev = last;
 
 			__ha_barrier_store();
 
@@ -165,7 +156,7 @@
 			 * The CAS will only succeed if its next is -1,
 			 * which means it's in the cache, and the last element.
 			 */
-			if (likely(HA_ATOMIC_CAS(&_GET_NEXT(last), &old, new)))
+			if (likely(HA_ATOMIC_CAS(&fdtab[last].cache.next, &old, new)))
 				break;
 			goto redo_last;
 		} while (1);
@@ -178,7 +169,7 @@
 	if (unlikely(!HA_ATOMIC_CAS(&list->last, &last, fd)))
 		goto redo_fd_cache;
 	__ha_barrier_store();
-	_GET_NEXT(fd) = -1;
+	fdtab[fd].cache.next = -1;
 	__ha_barrier_store();
 done:
 	return;
@@ -202,15 +193,14 @@
 #endif
 	int old;
 	int new = -2;
-	volatile int prev;
-	volatile int next;
+	int prev;
+	int next;
 	int last;
 
 lock_self:
 #if (defined(HA_CAS_IS_8B) || defined(HA_HAVE_CAS_DW))
 	next_list.next = next_list.prev = -2;
-	cur_list.prev = _GET_PREV(fd);
-	cur_list.next = _GET_NEXT(fd);
+	cur_list = fdtab[fd].cache;
 	/* First, attempt to lock our own entries */
 	do {
 		/* The FD is not in the FD cache, give up */
@@ -220,9 +210,9 @@
 			goto lock_self;
 	} while (
 #ifdef HA_CAS_IS_8B
-	    unlikely(!HA_ATOMIC_CAS(((void **)(void *)&_GET_NEXT(fd)), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
+	    unlikely(!HA_ATOMIC_CAS(((void **)(void *)&fdtab[fd].cache.next), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
 #else
-	    unlikely(!__ha_cas_dw((void *)&_GET_NEXT(fd), (void *)&cur_list, (void *)&next_list)))
+	    unlikely(!__ha_cas_dw((void *)&fdtab[fd].cache.next, (void *)&cur_list, (void *)&next_list)))
 #endif
 	    ;
 	next = cur_list.next;
@@ -230,18 +220,18 @@
 
 #else
 lock_self_next:
-	next = _GET_NEXT(fd);
+	next = fdtab[fd].cache.next;
 	if (next == -2)
 		goto lock_self_next;
 	if (next <= -3)
 		goto done;
-	if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(fd), &next, -2)))
+	if (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].cache.next, &next, -2)))
 		goto lock_self_next;
 lock_self_prev:
-	prev = _GET_PREV(fd);
+	prev = fdtab[fd].cache.prev;
 	if (prev == -2)
 		goto lock_self_prev;
-	if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd), &prev, -2)))
+	if (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].cache.prev, &prev, -2)))
 		goto lock_self_prev;
 #endif
 	__ha_barrier_store();
@@ -251,14 +241,14 @@
 redo_prev:
 		old = fd;
 
-		if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(prev), &old, new))) {
+		if (unlikely(!HA_ATOMIC_CAS(&fdtab[prev].cache.next, &old, new))) {
 			if (unlikely(old == -2)) {
 				/* Neighbour already locked, give up and
 				 * retry again once he's done
 				 */
-				_GET_PREV(fd) = prev;
+				fdtab[fd].cache.prev = prev;
 				__ha_barrier_store();
-				_GET_NEXT(fd) = next;
+				fdtab[fd].cache.next = next;
 				__ha_barrier_store();
 				goto lock_self;
 			}
@@ -268,18 +258,18 @@
 	if (likely(next != -1)) {
 redo_next:
 		old = fd;
-		if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(next), &old, new))) {
+		if (unlikely(!HA_ATOMIC_CAS(&fdtab[next].cache.prev, &old, new))) {
 			if (unlikely(old == -2)) {
 				/* Neighbour already locked, give up and
 				 * retry again once he's done
 				 */
 				if (prev != -1) {
-					_GET_NEXT(prev) = fd;
+					fdtab[prev].cache.next = fd;
 					__ha_barrier_store();
 				}
-				_GET_PREV(fd) = prev;
+				fdtab[fd].cache.prev = prev;
 				__ha_barrier_store();
-				_GET_NEXT(fd) = next;
+				fdtab[fd].cache.next = next;
 				__ha_barrier_store();
 				goto lock_self;
 			}
@@ -297,22 +287,18 @@
 	 */
 	__ha_barrier_store();
 	if (likely(prev != -1))
-		_GET_NEXT(prev) = next;
+		fdtab[prev].cache.next = next;
 	__ha_barrier_store();
 	if (likely(next != -1))
-		_GET_PREV(next) = prev;
+		fdtab[next].cache.prev = prev;
 	__ha_barrier_store();
 	/* Ok, now we're out of the fd cache */
-	_GET_NEXT(fd) = -(next + 4);
+	fdtab[fd].cache.next = -(next + 4);
 	__ha_barrier_store();
 done:
 	return;
 }
 
-#undef _GET_NEXT
-#undef _GET_PREV
-
-
 /* Removes entry used by fd <fd> from the FD cache and replaces it with the
  * last one.
  * If the fd has no entry assigned, return immediately.