MINOR: fd/threads: make _GET_NEXT()/_GET_PREV() use the volatile attribute

These macros are either used between atomic ops which cause the volatile
to be implicit, or with an explicit volatile cast. However not having it
in the macro causes some traps in the code because certain loop paths
cannot safely be used without risking infinite loops if one isn't careful
enough.

Let's place the volatile attribute inside the macros and remove them from
the explicit places to avoid this. It was verified that the output executable
remains exactly the same byte-wise.
diff --git a/src/fd.c b/src/fd.c
index 6bfefdd..828e140 100644
--- a/src/fd.c
+++ b/src/fd.c
@@ -115,8 +115,8 @@
 
 volatile int ha_used_fds = 0; // Number of FD we're currently using
 
-#define _GET_NEXT(fd, off) ((struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->next
-#define _GET_PREV(fd, off) ((struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->prev
+#define _GET_NEXT(fd, off) ((volatile struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->next
+#define _GET_PREV(fd, off) ((volatile struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->prev
 /* adds fd <fd> to fd list <list> if it was not yet in it */
 void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off)
 {
@@ -207,7 +207,7 @@
 
 #else
 lock_self_next:
-	next = ({ volatile int *next = &_GET_NEXT(fd, off); *next; });
+	next = _GET_NEXT(fd, off);
 	if (next == -2)
 		goto lock_self_next;
 	if (next <= -3)
@@ -215,7 +215,7 @@
 	if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)))
 		goto lock_self_next;
 lock_self_prev:
-	prev = ({ volatile int *prev = &_GET_PREV(fd, off); *prev; });
+	prev = _GET_PREV(fd, off);
 	if (prev == -2)
 		goto lock_self_prev;
 	if (unlikely(!_HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))